hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
697b1d975ae4466343a4f95319141c85278e74c9fd56d2ecdf57bdbc7266a18e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import units as u
from astropy.coordinates.attributes import (
CartesianRepresentationAttribute,
TimeAttribute,
)
from astropy.coordinates.baseframe import base_doc
from astropy.utils.decorators import format_doc
from .baseradec import BaseRADecFrame, doc_components
from .utils import DEFAULT_OBSTIME, EQUINOX_J2000
__all__ = ["GCRS", "PrecessedGeocentric"]
doc_footer_gcrs = """
Other parameters
----------------
obstime : `~astropy.time.Time`
The time at which the observation is taken. Used for determining the
position of the Earth.
obsgeoloc : `~astropy.coordinates.CartesianRepresentation`, `~astropy.units.Quantity`
The position of the observer relative to the center-of-mass of the
Earth, oriented the same as BCRS/ICRS. Either [0, 0, 0],
`~astropy.coordinates.CartesianRepresentation`, or proper input for one,
i.e., a `~astropy.units.Quantity` with shape (3, ...) and length units.
Defaults to [0, 0, 0], meaning "true" GCRS.
obsgeovel : `~astropy.coordinates.CartesianRepresentation`, `~astropy.units.Quantity`
The velocity of the observer relative to the center-of-mass of the
Earth, oriented the same as BCRS/ICRS. Either [0, 0, 0],
`~astropy.coordinates.CartesianRepresentation`, or proper input for one,
i.e., a `~astropy.units.Quantity` with shape (3, ...) and velocity
units. Defaults to [0, 0, 0], meaning "true" GCRS.
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer_gcrs)
class GCRS(BaseRADecFrame):
"""
A coordinate or frame in the Geocentric Celestial Reference System (GCRS).
GCRS is distinct form ICRS mainly in that it is relative to the Earth's
center-of-mass rather than the solar system Barycenter. That means this
frame includes the effects of aberration (unlike ICRS). For more background
on the GCRS, see the references provided in the
:ref:`astropy:astropy-coordinates-seealso` section of the documentation. (Of
particular note is Section 1.2 of
`USNO Circular 179 <https://arxiv.org/abs/astro-ph/0602086>`_)
This frame also includes frames that are defined *relative* to the Earth,
but that are offset (in both position and velocity) from the Earth.
The frame attributes are listed under **Other Parameters**.
"""
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
obsgeoloc = CartesianRepresentationAttribute(default=[0, 0, 0], unit=u.m)
obsgeovel = CartesianRepresentationAttribute(default=[0, 0, 0], unit=u.m / u.s)
# The "self-transform" is defined in icrs_cirs_transformations.py, because in
# the current implementation it goes through ICRS (like CIRS)
doc_footer_prec_geo = """
Other parameters
----------------
equinox : `~astropy.time.Time`
The (mean) equinox to precess the coordinates to.
obstime : `~astropy.time.Time`
The time at which the observation is taken. Used for determining the
position of the Earth.
obsgeoloc : `~astropy.coordinates.CartesianRepresentation`, `~astropy.units.Quantity`
The position of the observer relative to the center-of-mass of the
Earth, oriented the same as BCRS/ICRS. Either [0, 0, 0],
`~astropy.coordinates.CartesianRepresentation`, or proper input for one,
i.e., a `~astropy.units.Quantity` with shape (3, ...) and length units.
Defaults to [0, 0, 0], meaning "true" Geocentric.
obsgeovel : `~astropy.coordinates.CartesianRepresentation`, `~astropy.units.Quantity`
The velocity of the observer relative to the center-of-mass of the
Earth, oriented the same as BCRS/ICRS. Either 0,
`~astropy.coordinates.CartesianRepresentation`, or proper input for one,
i.e., a `~astropy.units.Quantity` with shape (3, ...) and velocity
units. Defaults to [0, 0, 0], meaning "true" Geocentric.
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer_prec_geo)
class PrecessedGeocentric(BaseRADecFrame):
"""
A coordinate frame defined in a similar manner as GCRS, but precessed to a
requested (mean) equinox. Note that this does *not* end up the same as
regular GCRS even for J2000 equinox, because the GCRS orientation is fixed
to that of ICRS, which is not quite the same as the dynamical J2000
orientation.
The frame attributes are listed under **Other Parameters**
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
obsgeoloc = CartesianRepresentationAttribute(default=[0, 0, 0], unit=u.m)
obsgeovel = CartesianRepresentationAttribute(default=[0, 0, 0], unit=u.m / u.s)
|
03a5b2490a1e3dd18a261a48f25720e0cd50ffc5bc26fb4d1c04610a4bf7e8dc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import units as u
from astropy.coordinates import representation as r
from astropy.coordinates.attributes import QuantityAttribute, TimeAttribute
from astropy.coordinates.baseframe import BaseCoordinateFrame, base_doc
from astropy.utils.decorators import format_doc
from .utils import DEFAULT_OBSTIME, EQUINOX_J2000
__all__ = [
"GeocentricMeanEcliptic",
"BarycentricMeanEcliptic",
"HeliocentricMeanEcliptic",
"BaseEclipticFrame",
"GeocentricTrueEcliptic",
"BarycentricTrueEcliptic",
"HeliocentricTrueEcliptic",
"HeliocentricEclipticIAU76",
"CustomBarycentricEcliptic",
]
doc_components_ecl = """
lon : `~astropy.coordinates.Angle`, optional, keyword-only
The ecliptic longitude for this object (``lat`` must also be given and
``representation`` must be None).
lat : `~astropy.coordinates.Angle`, optional, keyword-only
The ecliptic latitude for this object (``lon`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity` ['length'], optional, keyword-only
The distance for this object from the {0}.
(``representation`` must be None).
pm_lon_coslat : `~astropy.units.Quantity` ['angualar speed'], optional, keyword-only
The proper motion in the ecliptic longitude (including the ``cos(lat)``
factor) for this object (``pm_lat`` must also be given).
pm_lat : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in the ecliptic latitude for this object
(``pm_lon_coslat`` must also be given).
radial_velocity : `~astropy.units.Quantity` ['speed'], optional, keyword-only
The radial velocity of this object.
"""
@format_doc(
base_doc, components=doc_components_ecl.format("specified location"), footer=""
)
class BaseEclipticFrame(BaseCoordinateFrame):
"""
A base class for frames that have names and conventions like that of
ecliptic frames.
.. warning::
In the current version of astropy, the ecliptic frames do not yet have
stringent accuracy tests. We recommend you test to "known-good" cases
to ensure this frames are what you are looking for. (and then ideally
you would contribute these tests to Astropy!)
"""
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
doc_footer_geo = """
Other parameters
----------------
equinox : `~astropy.time.Time`, optional
The date to assume for this frame. Determines the location of the
x-axis and the location of the Earth (necessary for transformation to
non-geocentric systems). Defaults to the 'J2000' equinox.
obstime : `~astropy.time.Time`, optional
The time at which the observation is taken. Used for determining the
position of the Earth. Defaults to J2000.
"""
@format_doc(
base_doc, components=doc_components_ecl.format("geocenter"), footer=doc_footer_geo
)
class GeocentricMeanEcliptic(BaseEclipticFrame):
"""
Geocentric mean ecliptic coordinates. These origin of the coordinates are the
geocenter (Earth), with the x axis pointing to the *mean* (not true) equinox
at the time specified by the ``equinox`` attribute, and the xy-plane in the
plane of the ecliptic for that date.
Be aware that the definition of "geocentric" here means that this frame
*includes* light deflection from the sun, aberration, etc when transforming
to/from e.g. ICRS.
The frame attributes are listed under **Other Parameters**.
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
@format_doc(
base_doc, components=doc_components_ecl.format("geocenter"), footer=doc_footer_geo
)
class GeocentricTrueEcliptic(BaseEclipticFrame):
"""
Geocentric true ecliptic coordinates. These origin of the coordinates are the
geocenter (Earth), with the x axis pointing to the *true* (not mean) equinox
at the time specified by the ``equinox`` attribute, and the xy-plane in the
plane of the ecliptic for that date.
Be aware that the definition of "geocentric" here means that this frame
*includes* light deflection from the sun, aberration, etc when transforming
to/from e.g. ICRS.
The frame attributes are listed under **Other Parameters**.
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
doc_footer_bary = """
Other parameters
----------------
equinox : `~astropy.time.Time`, optional
The date to assume for this frame. Determines the location of the
x-axis and the location of the Earth and Sun.
Defaults to the 'J2000' equinox.
"""
@format_doc(
base_doc, components=doc_components_ecl.format("barycenter"), footer=doc_footer_bary
)
class BarycentricMeanEcliptic(BaseEclipticFrame):
"""
Barycentric mean ecliptic coordinates. These origin of the coordinates are the
barycenter of the solar system, with the x axis pointing in the direction of
the *mean* (not true) equinox as at the time specified by the ``equinox``
attribute (as seen from Earth), and the xy-plane in the plane of the
ecliptic for that date.
The frame attributes are listed under **Other Parameters**.
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
@format_doc(
base_doc, components=doc_components_ecl.format("barycenter"), footer=doc_footer_bary
)
class BarycentricTrueEcliptic(BaseEclipticFrame):
"""
Barycentric true ecliptic coordinates. These origin of the coordinates are the
barycenter of the solar system, with the x axis pointing in the direction of
the *true* (not mean) equinox as at the time specified by the ``equinox``
attribute (as seen from Earth), and the xy-plane in the plane of the
ecliptic for that date.
The frame attributes are listed under **Other Parameters**.
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
doc_footer_helio = """
Other parameters
----------------
equinox : `~astropy.time.Time`, optional
The date to assume for this frame. Determines the location of the
x-axis and the location of the Earth and Sun.
Defaults to the 'J2000' equinox.
obstime : `~astropy.time.Time`, optional
The time at which the observation is taken. Used for determining the
position of the Sun. Defaults to J2000.
"""
@format_doc(
base_doc,
components=doc_components_ecl.format("sun's center"),
footer=doc_footer_helio,
)
class HeliocentricMeanEcliptic(BaseEclipticFrame):
"""
Heliocentric mean ecliptic coordinates. These origin of the coordinates are the
center of the sun, with the x axis pointing in the direction of
the *mean* (not true) equinox as at the time specified by the ``equinox``
attribute (as seen from Earth), and the xy-plane in the plane of the
ecliptic for that date.
The frame attributes are listed under **Other Parameters**.
{params}
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
@format_doc(
base_doc,
components=doc_components_ecl.format("sun's center"),
footer=doc_footer_helio,
)
class HeliocentricTrueEcliptic(BaseEclipticFrame):
"""
Heliocentric true ecliptic coordinates. These origin of the coordinates are the
center of the sun, with the x axis pointing in the direction of
the *true* (not mean) equinox as at the time specified by the ``equinox``
attribute (as seen from Earth), and the xy-plane in the plane of the
ecliptic for that date.
The frame attributes are listed under **Other Parameters**.
{params}
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
@format_doc(base_doc, components=doc_components_ecl.format("sun's center"), footer="")
class HeliocentricEclipticIAU76(BaseEclipticFrame):
"""
Heliocentric mean (IAU 1976) ecliptic coordinates. These origin of the coordinates are the
center of the sun, with the x axis pointing in the direction of
the *mean* (not true) equinox of J2000, and the xy-plane in the plane of the
ecliptic of J2000 (according to the IAU 1976/1980 obliquity model).
It has, therefore, a fixed equinox and an older obliquity value
than the rest of the frames.
The frame attributes are listed under **Other Parameters**.
{params}
"""
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
@format_doc(base_doc, components=doc_components_ecl.format("barycenter"), footer="")
class CustomBarycentricEcliptic(BaseEclipticFrame):
"""
Barycentric ecliptic coordinates with custom obliquity.
These origin of the coordinates are the
barycenter of the solar system, with the x axis pointing in the direction of
the *mean* (not true) equinox of J2000, and the xy-plane in the plane of the
ecliptic tilted a custom obliquity angle.
The frame attributes are listed under **Other Parameters**.
"""
obliquity = QuantityAttribute(default=84381.448 * u.arcsec, unit=u.arcsec)
|
62615be3f34fd435515b67513f0cf8a43c460360d75697ece0a9ccfeb7b78061 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.coordinates.attributes import EarthLocationAttribute, TimeAttribute
from astropy.coordinates.baseframe import BaseCoordinateFrame, base_doc
from astropy.coordinates.representation import (
CartesianDifferential,
CartesianRepresentation,
)
from astropy.utils.decorators import format_doc
from .utils import DEFAULT_OBSTIME, EARTH_CENTER
__all__ = ["ITRS"]
doc_footer = """
Other parameters
----------------
obstime : `~astropy.time.Time`
The time at which the observation is taken. Used for determining the
position of the Earth and its precession.
location : `~astropy.coordinates.EarthLocation`
The location on the Earth. This can be specified either as an
`~astropy.coordinates.EarthLocation` object or as anything that can be
transformed to an `~astropy.coordinates.ITRS` frame. The default is the
centre of the Earth.
"""
@format_doc(base_doc, components="", footer=doc_footer)
class ITRS(BaseCoordinateFrame):
"""
A coordinate or frame in the International Terrestrial Reference System
(ITRS). This is approximately a geocentric system, although strictly it is
defined by a series of reference locations near the surface of the Earth (the ITRF).
For more background on the ITRS, see the references provided in the
:ref:`astropy:astropy-coordinates-seealso` section of the documentation.
This frame also includes frames that are defined *relative* to the center of the Earth,
but that are offset (in both position and velocity) from the center of the Earth. You
may see such non-geocentric coordinates referred to as "topocentric".
Topocentric ITRS frames are convenient for observations of near Earth objects where
stellar aberration is not included. One can merely subtract the observing site's
EarthLocation geocentric ITRS coordinates from the object's geocentric ITRS coordinates,
put the resulting vector into a topocentric ITRS frame and then transform to
`~astropy.coordinates.AltAz` or `~astropy.coordinates.HADec`. The other way around is
to transform an observed `~astropy.coordinates.AltAz` or `~astropy.coordinates.HADec`
position to a topocentric ITRS frame and add the observing site's EarthLocation geocentric
ITRS coordinates to yield the object's geocentric ITRS coordinates.
On the other hand, using ``transform_to`` to transform geocentric ITRS coordinates to
topocentric ITRS, observed `~astropy.coordinates.AltAz`, or observed
`~astropy.coordinates.HADec` coordinates includes the difference between stellar aberration
from the point of view of an observer at the geocenter and stellar aberration from the
point of view of an observer on the surface of the Earth. If the geocentric ITRS
coordinates of the object include stellar aberration at the geocenter (e.g. certain ILRS
ephemerides), then this is the way to go.
Note to ILRS ephemeris users: Astropy does not currently consider relativistic
effects of the Earth's gravatational field. Nor do the `~astropy.coordinates.AltAz`
or `~astropy.coordinates.HADec` refraction corrections compute the change in the
range due to the curved path of light through the atmosphere, so Astropy is no
substitute for the ILRS software in these respects.
"""
default_representation = CartesianRepresentation
default_differential = CartesianDifferential
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
location = EarthLocationAttribute(default=EARTH_CENTER)
@property
def earth_location(self):
"""
The data in this frame as an `~astropy.coordinates.EarthLocation` class.
"""
from astropy.coordinates.earth import EarthLocation
cart = self.represent_as(CartesianRepresentation)
return EarthLocation(x=cart.x, y=cart.y, z=cart.z)
# Self-transform is in intermediate_rotation_transforms.py with all the other
# ITRS transforms
|
8d86ab58e2089972c121e8615733e688c264d5c5b190ecfeaedadc4b9e3c0a48 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains the transformation functions for getting to "observed" systems from ICRS.
"""
import erfa
from astropy import units as u
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.builtin_frames.utils import atciqz, aticq
from astropy.coordinates.erfa_astrom import erfa_astrom
from astropy.coordinates.representation import (
CartesianRepresentation,
SphericalRepresentation,
UnitSphericalRepresentation,
)
from astropy.coordinates.transformations import FunctionTransformWithFiniteDifference
from .altaz import AltAz
from .hadec import HADec
from .icrs import ICRS
from .utils import PIOVER2
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ICRS, AltAz)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ICRS, HADec)
def icrs_to_observed(icrs_coo, observed_frame):
# if the data are UnitSphericalRepresentation, we can skip the distance calculations
is_unitspherical = (
isinstance(icrs_coo.data, UnitSphericalRepresentation)
or icrs_coo.cartesian.x.unit == u.one
)
# first set up the astrometry context for ICRS<->observed
astrom = erfa_astrom.get().apco(observed_frame)
# correct for parallax to find BCRS direction from observer (as in erfa.pmpx)
if is_unitspherical:
srepr = icrs_coo.spherical
else:
observer_icrs = CartesianRepresentation(
astrom["eb"], unit=u.au, xyz_axis=-1, copy=False
)
srepr = (icrs_coo.cartesian - observer_icrs).represent_as(
SphericalRepresentation
)
# convert to topocentric CIRS
cirs_ra, cirs_dec = atciqz(srepr, astrom)
# now perform observed conversion
if isinstance(observed_frame, AltAz):
lon, zen, _, _, _ = erfa.atioq(cirs_ra, cirs_dec, astrom)
lat = PIOVER2 - zen
else:
_, _, lon, lat, _ = erfa.atioq(cirs_ra, cirs_dec, astrom)
if is_unitspherical:
obs_srepr = UnitSphericalRepresentation(
lon << u.radian, lat << u.radian, copy=False
)
else:
obs_srepr = SphericalRepresentation(
lon << u.radian, lat << u.radian, srepr.distance, copy=False
)
return observed_frame.realize_frame(obs_srepr)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, AltAz, ICRS)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, HADec, ICRS)
def observed_to_icrs(observed_coo, icrs_frame):
# if the data are UnitSphericalRepresentation, we can skip the distance calculations
is_unitspherical = (
isinstance(observed_coo.data, UnitSphericalRepresentation)
or observed_coo.cartesian.x.unit == u.one
)
usrepr = observed_coo.represent_as(UnitSphericalRepresentation)
lon = usrepr.lon.to_value(u.radian)
lat = usrepr.lat.to_value(u.radian)
if isinstance(observed_coo, AltAz):
# the 'A' indicates zen/az inputs
coord_type = "A"
lat = PIOVER2 - lat
else:
coord_type = "H"
# first set up the astrometry context for ICRS<->CIRS at the observed_coo time
astrom = erfa_astrom.get().apco(observed_coo)
# Topocentric CIRS
cirs_ra, cirs_dec = erfa.atoiq(coord_type, lon, lat, astrom) << u.radian
if is_unitspherical:
srepr = SphericalRepresentation(cirs_ra, cirs_dec, 1, copy=False)
else:
srepr = SphericalRepresentation(
lon=cirs_ra, lat=cirs_dec, distance=observed_coo.distance, copy=False
)
# BCRS (Astrometric) direction to source
bcrs_ra, bcrs_dec = aticq(srepr, astrom) << u.radian
# Correct for parallax to get ICRS representation
if is_unitspherical:
icrs_srepr = UnitSphericalRepresentation(bcrs_ra, bcrs_dec, copy=False)
else:
icrs_srepr = SphericalRepresentation(
lon=bcrs_ra, lat=bcrs_dec, distance=observed_coo.distance, copy=False
)
observer_icrs = CartesianRepresentation(
astrom["eb"], unit=u.au, xyz_axis=-1, copy=False
)
newrepr = icrs_srepr.to_cartesian() + observer_icrs
icrs_srepr = newrepr.represent_as(SphericalRepresentation)
return icrs_frame.realize_frame(icrs_srepr)
# Create loopback transformations
frame_transform_graph._add_merged_transform(AltAz, ICRS, AltAz)
frame_transform_graph._add_merged_transform(HADec, ICRS, HADec)
# for now we just implement this through ICRS to make sure we get everything
# covered
# Before, this was using CIRS as intermediate frame, however this is much
# slower than the direct observed<->ICRS transform added in 4.3
# due to how the frame attribute broadcasting works, see
# https://github.com/astropy/astropy/pull/10994#issuecomment-722617041
|
ef331c9b66a17819e996637d0567dd2e8b59c134b6bc37f9447bfb2e89faed69 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains functions/values used repeatedly in different modules of
the ``builtin_frames`` package.
"""
import warnings
import erfa
import numpy as np
from astropy import units as u
from astropy.coordinates.earth import EarthLocation
from astropy.coordinates.representation import CartesianDifferential
from astropy.time import Time
from astropy.utils import iers
from astropy.utils.exceptions import AstropyWarning
# We use tt as the time scale for this equinoxes, primarily because it is the
# convention for J2000 (it is unclear if there is any "right answer" for B1950)
# while #8600 makes this the default behavior, we show it here to ensure it's
# clear which is used here
EQUINOX_J2000 = Time("J2000", scale="tt")
EQUINOX_B1950 = Time("B1950", scale="tt")
# This is a time object that is the default "obstime" when such an attribute is
# necessary. Currently, we use J2000.
DEFAULT_OBSTIME = Time("J2000", scale="tt")
# This is an EarthLocation that is the default "location" when such an attribute is
# necessary. It is the centre of the Earth.
EARTH_CENTER = EarthLocation(0 * u.km, 0 * u.km, 0 * u.km)
PIOVER2 = np.pi / 2.0
# comes from the mean of the 1962-2014 IERS B data
_DEFAULT_PM = (0.035, 0.29) * u.arcsec
def get_polar_motion(time):
"""
gets the two polar motion components in radians for use with apio
"""
# Get the polar motion from the IERS table
iers_table = iers.earth_orientation_table.get()
xp, yp, status = iers_table.pm_xy(time, return_status=True)
wmsg = (
"Tried to get polar motions for times {} IERS data is "
"valid. Defaulting to polar motion from the 50-yr mean for those. "
"This may affect precision at the arcsec level. Please check your "
"astropy.utils.iers.conf.iers_auto_url and point it to a newer "
"version if necessary."
)
if np.any(status == iers.TIME_BEFORE_IERS_RANGE):
xp[status == iers.TIME_BEFORE_IERS_RANGE] = _DEFAULT_PM[0]
yp[status == iers.TIME_BEFORE_IERS_RANGE] = _DEFAULT_PM[1]
warnings.warn(wmsg.format("before"), AstropyWarning)
if np.any(status == iers.TIME_BEYOND_IERS_RANGE):
xp[status == iers.TIME_BEYOND_IERS_RANGE] = _DEFAULT_PM[0]
yp[status == iers.TIME_BEYOND_IERS_RANGE] = _DEFAULT_PM[1]
warnings.warn(wmsg.format("after"), AstropyWarning)
return xp.to_value(u.radian), yp.to_value(u.radian)
def _warn_iers(ierserr):
"""
Generate a warning for an IERSRangeerror
Parameters
----------
ierserr : An `~astropy.utils.iers.IERSRangeError`
"""
msg = "{0} Assuming UT1-UTC=0 for coordinate transformations."
warnings.warn(msg.format(ierserr.args[0]), AstropyWarning)
def get_dut1utc(time):
"""
This function is used to get UT1-UTC in coordinates because normally it
gives an error outside the IERS range, but in coordinates we want to allow
it to go through but with a warning.
"""
try:
return time.delta_ut1_utc
except iers.IERSRangeError as e:
_warn_iers(e)
return np.zeros(time.shape)
def get_jd12(time, scale):
"""
Gets ``jd1`` and ``jd2`` from a time object in a particular scale.
Parameters
----------
time : `~astropy.time.Time`
The time to get the jds for
scale : str
The time scale to get the jds for
Returns
-------
jd1 : float
jd2 : float
"""
if time.scale == scale:
newtime = time
else:
try:
newtime = getattr(time, scale)
except iers.IERSRangeError as e:
_warn_iers(e)
newtime = time
return newtime.jd1, newtime.jd2
def norm(p):
"""
Normalise a p-vector.
"""
return p / np.sqrt(np.einsum("...i,...i", p, p))[..., np.newaxis]
def pav2pv(p, v):
"""
Combine p- and v- vectors into a pv-vector.
"""
pv = np.empty(np.broadcast(p, v).shape[:-1], erfa.dt_pv)
pv["p"] = p
pv["v"] = v
return pv
def get_cip(jd1, jd2):
"""
Find the X, Y coordinates of the CIP and the CIO locator, s.
Parameters
----------
jd1 : float or `np.ndarray`
First part of two part Julian date (TDB)
jd2 : float or `np.ndarray`
Second part of two part Julian date (TDB)
Returns
-------
x : float or `np.ndarray`
x coordinate of the CIP
y : float or `np.ndarray`
y coordinate of the CIP
s : float or `np.ndarray`
CIO locator, s
"""
# classical NPB matrix, IAU 2006/2000A
rpnb = erfa.pnm06a(jd1, jd2)
# CIP X, Y coordinates from array
x, y = erfa.bpn2xy(rpnb)
# CIO locator, s
s = erfa.s06(jd1, jd2, x, y)
return x, y, s
def aticq(srepr, astrom):
"""
A slightly modified version of the ERFA function ``eraAticq``.
``eraAticq`` performs the transformations between two coordinate systems,
with the details of the transformation being encoded into the ``astrom`` array.
There are two issues with the version of aticq in ERFA. Both are associated
with the handling of light deflection.
The companion function ``eraAtciqz`` is meant to be its inverse. However, this
is not true for directions close to the Solar centre, since the light deflection
calculations are numerically unstable and therefore not reversible.
This version sidesteps that problem by artificially reducing the light deflection
for directions which are within 90 arcseconds of the Sun's position. This is the
same approach used by the ERFA functions above, except that they use a threshold of
9 arcseconds.
In addition, ERFA's aticq assumes a distant source, so there is no difference between
the object-Sun vector and the observer-Sun vector. This can lead to errors of up to a
few arcseconds in the worst case (e.g a Venus transit).
Parameters
----------
srepr : `~astropy.coordinates.SphericalRepresentation`
Astrometric GCRS or CIRS position of object from observer
astrom : eraASTROM array
ERFA astrometry context, as produced by, e.g. ``eraApci13`` or ``eraApcs13``
Returns
-------
rc : float or `~numpy.ndarray`
Right Ascension in radians
dc : float or `~numpy.ndarray`
Declination in radians
"""
# ignore parallax effects if no distance, or far away
srepr_distance = srepr.distance
ignore_distance = srepr_distance.unit == u.one
# RA, Dec to cartesian unit vectors
pos = erfa.s2c(srepr.lon.radian, srepr.lat.radian)
# Bias-precession-nutation, giving GCRS proper direction.
ppr = erfa.trxp(astrom["bpn"], pos)
# Aberration, giving GCRS natural direction
d = np.zeros_like(ppr)
for j in range(2):
before = norm(ppr - d)
after = erfa.ab(before, astrom["v"], astrom["em"], astrom["bm1"])
d = after - before
pnat = norm(ppr - d)
# Light deflection by the Sun, giving BCRS coordinate direction
d = np.zeros_like(pnat)
for j in range(5):
before = norm(pnat - d)
if ignore_distance:
# No distance to object, assume a long way away
q = before
else:
# Find BCRS direction of Sun to object.
# astrom['eh'] and astrom['em'] contain Sun to observer unit vector,
# and distance, respectively.
eh = astrom["em"][..., np.newaxis] * astrom["eh"]
# unit vector from Sun to object
q = eh + srepr_distance[..., np.newaxis].to_value(u.au) * before
sundist, q = erfa.pn(q)
sundist = sundist[..., np.newaxis]
# calculation above is extremely unstable very close to the sun
# in these situations, default back to ldsun-style behaviour,
# since this is reversible and drops to zero within stellar limb
q = np.where(sundist > 1.0e-10, q, before)
after = erfa.ld(1.0, before, q, astrom["eh"], astrom["em"], 1e-6)
d = after - before
pco = norm(pnat - d)
# ICRS astrometric RA, Dec
rc, dc = erfa.c2s(pco)
return erfa.anp(rc), dc
def atciqz(srepr, astrom):
"""
A slightly modified version of the ERFA function ``eraAtciqz``.
``eraAtciqz`` performs the transformations between two coordinate systems,
with the details of the transformation being encoded into the ``astrom`` array.
There are two issues with the version of atciqz in ERFA. Both are associated
with the handling of light deflection.
The companion function ``eraAticq`` is meant to be its inverse. However, this
is not true for directions close to the Solar centre, since the light deflection
calculations are numerically unstable and therefore not reversible.
This version sidesteps that problem by artificially reducing the light deflection
for directions which are within 90 arcseconds of the Sun's position. This is the
same approach used by the ERFA functions above, except that they use a threshold of
9 arcseconds.
In addition, ERFA's atciqz assumes a distant source, so there is no difference between
the object-Sun vector and the observer-Sun vector. This can lead to errors of up to a
few arcseconds in the worst case (e.g a Venus transit).
Parameters
----------
srepr : `~astropy.coordinates.SphericalRepresentation`
Astrometric ICRS position of object from observer
astrom : eraASTROM array
ERFA astrometry context, as produced by, e.g. ``eraApci13`` or ``eraApcs13``
Returns
-------
ri : float or `~numpy.ndarray`
Right Ascension in radians
di : float or `~numpy.ndarray`
Declination in radians
"""
# ignore parallax effects if no distance, or far away
srepr_distance = srepr.distance
ignore_distance = srepr_distance.unit == u.one
# BCRS coordinate direction (unit vector).
pco = erfa.s2c(srepr.lon.radian, srepr.lat.radian)
# Find BCRS direction of Sun to object
if ignore_distance:
# No distance to object, assume a long way away
q = pco
else:
# Find BCRS direction of Sun to object.
# astrom['eh'] and astrom['em'] contain Sun to observer unit vector,
# and distance, respectively.
eh = astrom["em"][..., np.newaxis] * astrom["eh"]
# unit vector from Sun to object
q = eh + srepr_distance[..., np.newaxis].to_value(u.au) * pco
sundist, q = erfa.pn(q)
sundist = sundist[..., np.newaxis]
# calculation above is extremely unstable very close to the sun
# in these situations, default back to ldsun-style behaviour,
# since this is reversible and drops to zero within stellar limb
q = np.where(sundist > 1.0e-10, q, pco)
# Light deflection by the Sun, giving BCRS natural direction.
pnat = erfa.ld(1.0, pco, q, astrom["eh"], astrom["em"], 1e-6)
# Aberration, giving GCRS proper direction.
ppr = erfa.ab(pnat, astrom["v"], astrom["em"], astrom["bm1"])
# Bias-precession-nutation, giving CIRS proper direction.
# Has no effect if matrix is identity matrix, in which case gives GCRS ppr.
pi = erfa.rxp(astrom["bpn"], ppr)
# CIRS (GCRS) RA, Dec
ri, di = erfa.c2s(pi)
return erfa.anp(ri), di
def prepare_earth_position_vel(time):
"""
Get barycentric position and velocity, and heliocentric position of Earth
Parameters
----------
time : `~astropy.time.Time`
time at which to calculate position and velocity of Earth
Returns
-------
earth_pv : `np.ndarray`
Barycentric position and velocity of Earth, in au and au/day
earth_helio : `np.ndarray`
Heliocentric position of Earth in au
"""
# this goes here to avoid circular import errors
from astropy.coordinates.solar_system import (
get_body_barycentric,
get_body_barycentric_posvel,
solar_system_ephemeris,
)
# get barycentric position and velocity of earth
ephemeris = solar_system_ephemeris.get()
# if we are using the builtin erfa based ephemeris,
# we can use the fact that epv00 already provides all we need.
# This avoids calling epv00 twice, once
# in get_body_barycentric_posvel('earth') and once in
# get_body_barycentric('sun')
if ephemeris == "builtin":
jd1, jd2 = get_jd12(time, "tdb")
earth_pv_heliocentric, earth_pv = erfa.epv00(jd1, jd2)
earth_heliocentric = earth_pv_heliocentric["p"]
# all other ephemeris providers probably don't have a shortcut like this
else:
earth_p, earth_v = get_body_barycentric_posvel("earth", time)
# get heliocentric position of earth, preparing it for passing to erfa.
sun = get_body_barycentric("sun", time)
earth_heliocentric = (earth_p - sun).get_xyz(xyz_axis=-1).to_value(u.au)
# Also prepare earth_pv for passing to erfa, which wants it as
# a structured dtype.
earth_pv = pav2pv(
earth_p.get_xyz(xyz_axis=-1).to_value(u.au),
earth_v.get_xyz(xyz_axis=-1).to_value(u.au / u.d),
)
return earth_pv, earth_heliocentric
def get_offset_sun_from_barycenter(time, include_velocity=False, reverse=False):
"""
Returns the offset of the Sun center from the solar-system barycenter (SSB).
Parameters
----------
time : `~astropy.time.Time`
Time at which to calculate the offset
include_velocity : `bool`
If ``True``, attach the velocity as a differential. Defaults to ``False``.
reverse : `bool`
If ``True``, return the offset of the barycenter from the Sun. Defaults to ``False``.
Returns
-------
`~astropy.coordinates.CartesianRepresentation`
The offset
"""
if include_velocity:
# Import here to avoid a circular import
from astropy.coordinates.solar_system import get_body_barycentric_posvel
offset_pos, offset_vel = get_body_barycentric_posvel("sun", time)
if reverse:
offset_pos, offset_vel = -offset_pos, -offset_vel
offset_vel = offset_vel.represent_as(CartesianDifferential)
offset_pos = offset_pos.with_differentials(offset_vel)
else:
# Import here to avoid a circular import
from astropy.coordinates.solar_system import get_body_barycentric
offset_pos = get_body_barycentric("sun", time)
if reverse:
offset_pos = -offset_pos
return offset_pos
|
9a4833042f25f9b5030a3205960920e69c76c1d79338f4302416b388440a7959 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains the transformation functions for getting to/from ecliptic systems.
"""
import erfa
from astropy import units as u
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.errors import UnitsError
from astropy.coordinates.matrix_utilities import matrix_transpose, rotation_matrix
from astropy.coordinates.transformations import (
AffineTransform,
DynamicMatrixTransform,
FunctionTransformWithFiniteDifference,
)
from .ecliptic import (
BarycentricMeanEcliptic,
BarycentricTrueEcliptic,
CustomBarycentricEcliptic,
GeocentricMeanEcliptic,
GeocentricTrueEcliptic,
HeliocentricEclipticIAU76,
HeliocentricMeanEcliptic,
HeliocentricTrueEcliptic,
)
from .gcrs import GCRS
from .icrs import ICRS
from .utils import EQUINOX_J2000, get_jd12, get_offset_sun_from_barycenter
def _mean_ecliptic_rotation_matrix(equinox):
# This code just calls ecm06, which uses the precession matrix according to the
# IAU 2006 model, but leaves out nutation. This brings the results closer to what
# other libraries give (see https://github.com/astropy/astropy/pull/6508).
return erfa.ecm06(*get_jd12(equinox, "tt"))
def _true_ecliptic_rotation_matrix(equinox):
# This code calls the same routines as done in pnm06a from ERFA, which
# retrieves the precession matrix (including frame bias) according to
# the IAU 2006 model, and including the nutation.
# This family of systems is less popular
# (see https://github.com/astropy/astropy/pull/6508).
jd1, jd2 = get_jd12(equinox, "tt")
# Here, we call the three routines from erfa.pnm06a separately,
# so that we can keep the nutation for calculating the true obliquity
# (which is a fairly expensive operation); see gh-11000.
# pnm06a: Fukushima-Williams angles for frame bias and precession.
# (ERFA names short for F-W's gamma_bar, phi_bar, psi_bar and epsilon_A).
gamb, phib, psib, epsa = erfa.pfw06(jd1, jd2)
# pnm06a: Nutation components (in longitude and obliquity).
dpsi, deps = erfa.nut06a(jd1, jd2)
# pnm06a: Equinox based nutation x precession x bias matrix.
rnpb = erfa.fw2m(gamb, phib, psib + dpsi, epsa + deps)
# calculate the true obliquity of the ecliptic
obl = erfa.obl06(jd1, jd2) + deps
return rotation_matrix(obl << u.radian, "x") @ rnpb
def _obliquity_only_rotation_matrix(
obl=erfa.obl80(EQUINOX_J2000.jd1, EQUINOX_J2000.jd2) * u.radian
):
# This code only accounts for the obliquity,
# which can be passed explicitly.
# The default value is the IAU 1980 value for J2000,
# which is computed using obl80 from ERFA:
#
# obl = erfa.obl80(EQUINOX_J2000.jd1, EQUINOX_J2000.jd2) * u.radian
return rotation_matrix(obl, "x")
# MeanEcliptic frames
@frame_transform_graph.transform(
FunctionTransformWithFiniteDifference,
GCRS,
GeocentricMeanEcliptic,
finite_difference_frameattr_name="equinox",
)
def gcrs_to_geoecliptic(gcrs_coo, to_frame):
# first get us to a 0 pos/vel GCRS at the target equinox
gcrs_coo2 = gcrs_coo.transform_to(GCRS(obstime=to_frame.obstime))
rmat = _mean_ecliptic_rotation_matrix(to_frame.equinox)
newrepr = gcrs_coo2.cartesian.transform(rmat)
return to_frame.realize_frame(newrepr)
@frame_transform_graph.transform(
FunctionTransformWithFiniteDifference, GeocentricMeanEcliptic, GCRS
)
def geoecliptic_to_gcrs(from_coo, gcrs_frame):
rmat = _mean_ecliptic_rotation_matrix(from_coo.equinox)
newrepr = from_coo.cartesian.transform(matrix_transpose(rmat))
gcrs = GCRS(newrepr, obstime=from_coo.obstime)
# now do any needed offsets (no-op if same obstime and 0 pos/vel)
return gcrs.transform_to(gcrs_frame)
@frame_transform_graph.transform(DynamicMatrixTransform, ICRS, BarycentricMeanEcliptic)
def icrs_to_baryecliptic(from_coo, to_frame):
return _mean_ecliptic_rotation_matrix(to_frame.equinox)
@frame_transform_graph.transform(DynamicMatrixTransform, BarycentricMeanEcliptic, ICRS)
def baryecliptic_to_icrs(from_coo, to_frame):
return matrix_transpose(icrs_to_baryecliptic(to_frame, from_coo))
_NEED_ORIGIN_HINT = (
"The input {0} coordinates do not have length units. This probably means you"
" created coordinates with lat/lon but no distance. Heliocentric<->ICRS transforms"
" cannot function in this case because there is an origin shift."
)
@frame_transform_graph.transform(AffineTransform, ICRS, HeliocentricMeanEcliptic)
def icrs_to_helioecliptic(from_coo, to_frame):
if not u.m.is_equivalent(from_coo.cartesian.x.unit):
raise UnitsError(_NEED_ORIGIN_HINT.format(from_coo.__class__.__name__))
# get the offset of the barycenter from the Sun
ssb_from_sun = get_offset_sun_from_barycenter(
to_frame.obstime,
reverse=True,
include_velocity=bool(from_coo.data.differentials),
)
# now compute the matrix to precess to the right orientation
rmat = _mean_ecliptic_rotation_matrix(to_frame.equinox)
return rmat, ssb_from_sun.transform(rmat)
@frame_transform_graph.transform(AffineTransform, HeliocentricMeanEcliptic, ICRS)
def helioecliptic_to_icrs(from_coo, to_frame):
if not u.m.is_equivalent(from_coo.cartesian.x.unit):
raise UnitsError(_NEED_ORIGIN_HINT.format(from_coo.__class__.__name__))
# first un-precess from ecliptic to ICRS orientation
rmat = _mean_ecliptic_rotation_matrix(from_coo.equinox)
# now offset back to barycentric, which is the correct center for ICRS
sun_from_ssb = get_offset_sun_from_barycenter(
from_coo.obstime, include_velocity=bool(from_coo.data.differentials)
)
return matrix_transpose(rmat), sun_from_ssb
# TrueEcliptic frames
@frame_transform_graph.transform(
FunctionTransformWithFiniteDifference,
GCRS,
GeocentricTrueEcliptic,
finite_difference_frameattr_name="equinox",
)
def gcrs_to_true_geoecliptic(gcrs_coo, to_frame):
# first get us to a 0 pos/vel GCRS at the target equinox
gcrs_coo2 = gcrs_coo.transform_to(GCRS(obstime=to_frame.obstime))
rmat = _true_ecliptic_rotation_matrix(to_frame.equinox)
newrepr = gcrs_coo2.cartesian.transform(rmat)
return to_frame.realize_frame(newrepr)
@frame_transform_graph.transform(
FunctionTransformWithFiniteDifference, GeocentricTrueEcliptic, GCRS
)
def true_geoecliptic_to_gcrs(from_coo, gcrs_frame):
rmat = _true_ecliptic_rotation_matrix(from_coo.equinox)
newrepr = from_coo.cartesian.transform(matrix_transpose(rmat))
gcrs = GCRS(newrepr, obstime=from_coo.obstime)
# now do any needed offsets (no-op if same obstime and 0 pos/vel)
return gcrs.transform_to(gcrs_frame)
@frame_transform_graph.transform(DynamicMatrixTransform, ICRS, BarycentricTrueEcliptic)
def icrs_to_true_baryecliptic(from_coo, to_frame):
return _true_ecliptic_rotation_matrix(to_frame.equinox)
@frame_transform_graph.transform(DynamicMatrixTransform, BarycentricTrueEcliptic, ICRS)
def true_baryecliptic_to_icrs(from_coo, to_frame):
return matrix_transpose(icrs_to_true_baryecliptic(to_frame, from_coo))
@frame_transform_graph.transform(AffineTransform, ICRS, HeliocentricTrueEcliptic)
def icrs_to_true_helioecliptic(from_coo, to_frame):
if not u.m.is_equivalent(from_coo.cartesian.x.unit):
raise UnitsError(_NEED_ORIGIN_HINT.format(from_coo.__class__.__name__))
# get the offset of the barycenter from the Sun
ssb_from_sun = get_offset_sun_from_barycenter(
to_frame.obstime,
reverse=True,
include_velocity=bool(from_coo.data.differentials),
)
# now compute the matrix to precess to the right orientation
rmat = _true_ecliptic_rotation_matrix(to_frame.equinox)
return rmat, ssb_from_sun.transform(rmat)
@frame_transform_graph.transform(AffineTransform, HeliocentricTrueEcliptic, ICRS)
def true_helioecliptic_to_icrs(from_coo, to_frame):
if not u.m.is_equivalent(from_coo.cartesian.x.unit):
raise UnitsError(_NEED_ORIGIN_HINT.format(from_coo.__class__.__name__))
# first un-precess from ecliptic to ICRS orientation
rmat = _true_ecliptic_rotation_matrix(from_coo.equinox)
# now offset back to barycentric, which is the correct center for ICRS
sun_from_ssb = get_offset_sun_from_barycenter(
from_coo.obstime, include_velocity=bool(from_coo.data.differentials)
)
return matrix_transpose(rmat), sun_from_ssb
# Other ecliptic frames
@frame_transform_graph.transform(AffineTransform, HeliocentricEclipticIAU76, ICRS)
def ecliptic_to_iau76_icrs(from_coo, to_frame):
# first un-precess from ecliptic to ICRS orientation
rmat = _obliquity_only_rotation_matrix()
# now offset back to barycentric, which is the correct center for ICRS
sun_from_ssb = get_offset_sun_from_barycenter(
from_coo.obstime, include_velocity=bool(from_coo.data.differentials)
)
return matrix_transpose(rmat), sun_from_ssb
@frame_transform_graph.transform(AffineTransform, ICRS, HeliocentricEclipticIAU76)
def icrs_to_iau76_ecliptic(from_coo, to_frame):
# get the offset of the barycenter from the Sun
ssb_from_sun = get_offset_sun_from_barycenter(
to_frame.obstime,
reverse=True,
include_velocity=bool(from_coo.data.differentials),
)
# now compute the matrix to precess to the right orientation
rmat = _obliquity_only_rotation_matrix()
return rmat, ssb_from_sun.transform(rmat)
@frame_transform_graph.transform(
DynamicMatrixTransform, ICRS, CustomBarycentricEcliptic
)
def icrs_to_custombaryecliptic(from_coo, to_frame):
return _obliquity_only_rotation_matrix(to_frame.obliquity)
@frame_transform_graph.transform(
DynamicMatrixTransform, CustomBarycentricEcliptic, ICRS
)
def custombaryecliptic_to_icrs(from_coo, to_frame):
return icrs_to_custombaryecliptic(to_frame, from_coo).T
# Create loopback transformations
frame_transform_graph._add_merged_transform(
GeocentricMeanEcliptic, ICRS, GeocentricMeanEcliptic
)
frame_transform_graph._add_merged_transform(
GeocentricTrueEcliptic, ICRS, GeocentricTrueEcliptic
)
frame_transform_graph._add_merged_transform(
HeliocentricMeanEcliptic, ICRS, HeliocentricMeanEcliptic
)
frame_transform_graph._add_merged_transform(
HeliocentricTrueEcliptic, ICRS, HeliocentricTrueEcliptic
)
frame_transform_graph._add_merged_transform(
HeliocentricEclipticIAU76, ICRS, HeliocentricEclipticIAU76
)
frame_transform_graph._add_merged_transform(
BarycentricMeanEcliptic, ICRS, BarycentricMeanEcliptic
)
frame_transform_graph._add_merged_transform(
BarycentricTrueEcliptic, ICRS, BarycentricTrueEcliptic
)
frame_transform_graph._add_merged_transform(
CustomBarycentricEcliptic, ICRS, CustomBarycentricEcliptic
)
|
e32dcb9f3bace4766e0164e42495df2bcc1d1feddec3426dc878f733152e634f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains the transformation functions for getting from ICRS/HCRS to CIRS and
anything in between (currently that means GCRS)
"""
import numpy as np
from astropy import units as u
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.erfa_astrom import erfa_astrom
from astropy.coordinates.representation import (
CartesianRepresentation,
SphericalRepresentation,
UnitSphericalRepresentation,
)
from astropy.coordinates.transformations import (
AffineTransform,
FunctionTransformWithFiniteDifference,
)
from .cirs import CIRS
from .gcrs import GCRS
from .hcrs import HCRS
from .icrs import ICRS
from .utils import atciqz, aticq, get_offset_sun_from_barycenter
# First the ICRS/CIRS related transforms
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ICRS, CIRS)
def icrs_to_cirs(icrs_coo, cirs_frame):
# first set up the astrometry context for ICRS<->CIRS
astrom = erfa_astrom.get().apco(cirs_frame)
if (
icrs_coo.data.get_name() == "unitspherical"
or icrs_coo.data.to_cartesian().x.unit == u.one
):
# if no distance, just do the infinite-distance/no parallax calculation
srepr = icrs_coo.spherical
cirs_ra, cirs_dec = atciqz(srepr.without_differentials(), astrom)
newrep = UnitSphericalRepresentation(
lat=u.Quantity(cirs_dec, u.radian, copy=False),
lon=u.Quantity(cirs_ra, u.radian, copy=False),
copy=False,
)
else:
# When there is a distance, we first offset for parallax to get the
# astrometric coordinate direction and *then* run the ERFA transform for
# no parallax/PM. This ensures reversibility and is more sensible for
# inside solar system objects
astrom_eb = CartesianRepresentation(
astrom["eb"], unit=u.au, xyz_axis=-1, copy=False
)
newcart = icrs_coo.cartesian - astrom_eb
srepr = newcart.represent_as(SphericalRepresentation)
cirs_ra, cirs_dec = atciqz(srepr.without_differentials(), astrom)
newrep = SphericalRepresentation(
lat=u.Quantity(cirs_dec, u.radian, copy=False),
lon=u.Quantity(cirs_ra, u.radian, copy=False),
distance=srepr.distance,
copy=False,
)
return cirs_frame.realize_frame(newrep)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, ICRS)
def cirs_to_icrs(cirs_coo, icrs_frame):
# set up the astrometry context for ICRS<->cirs and then convert to
# astrometric coordinate direction
astrom = erfa_astrom.get().apco(cirs_coo)
srepr = cirs_coo.represent_as(SphericalRepresentation)
i_ra, i_dec = aticq(srepr.without_differentials(), astrom)
if (
cirs_coo.data.get_name() == "unitspherical"
or cirs_coo.data.to_cartesian().x.unit == u.one
):
# if no distance, just use the coordinate direction to yield the
# infinite-distance/no parallax answer
newrep = UnitSphericalRepresentation(
lat=u.Quantity(i_dec, u.radian, copy=False),
lon=u.Quantity(i_ra, u.radian, copy=False),
copy=False,
)
else:
# When there is a distance, apply the parallax/offset to the SSB as the
# last step - ensures round-tripping with the icrs_to_cirs transform
# the distance in intermedrep is *not* a real distance as it does not
# include the offset back to the SSB
intermedrep = SphericalRepresentation(
lat=u.Quantity(i_dec, u.radian, copy=False),
lon=u.Quantity(i_ra, u.radian, copy=False),
distance=srepr.distance,
copy=False,
)
astrom_eb = CartesianRepresentation(
astrom["eb"], unit=u.au, xyz_axis=-1, copy=False
)
newrep = intermedrep + astrom_eb
return icrs_frame.realize_frame(newrep)
# Now the GCRS-related transforms to/from ICRS
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ICRS, GCRS)
def icrs_to_gcrs(icrs_coo, gcrs_frame):
# first set up the astrometry context for ICRS<->GCRS.
astrom = erfa_astrom.get().apcs(gcrs_frame)
if (
icrs_coo.data.get_name() == "unitspherical"
or icrs_coo.data.to_cartesian().x.unit == u.one
):
# if no distance, just do the infinite-distance/no parallax calculation
srepr = icrs_coo.represent_as(SphericalRepresentation)
gcrs_ra, gcrs_dec = atciqz(srepr.without_differentials(), astrom)
newrep = UnitSphericalRepresentation(
lat=u.Quantity(gcrs_dec, u.radian, copy=False),
lon=u.Quantity(gcrs_ra, u.radian, copy=False),
copy=False,
)
else:
# When there is a distance, we first offset for parallax to get the
# BCRS coordinate direction and *then* run the ERFA transform for no
# parallax/PM. This ensures reversibility and is more sensible for
# inside solar system objects
astrom_eb = CartesianRepresentation(
astrom["eb"], unit=u.au, xyz_axis=-1, copy=False
)
newcart = icrs_coo.cartesian - astrom_eb
srepr = newcart.represent_as(SphericalRepresentation)
gcrs_ra, gcrs_dec = atciqz(srepr.without_differentials(), astrom)
newrep = SphericalRepresentation(
lat=u.Quantity(gcrs_dec, u.radian, copy=False),
lon=u.Quantity(gcrs_ra, u.radian, copy=False),
distance=srepr.distance,
copy=False,
)
return gcrs_frame.realize_frame(newrep)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, ICRS)
def gcrs_to_icrs(gcrs_coo, icrs_frame):
# set up the astrometry context for ICRS<->GCRS and then convert to BCRS
# coordinate direction
astrom = erfa_astrom.get().apcs(gcrs_coo)
srepr = gcrs_coo.represent_as(SphericalRepresentation)
i_ra, i_dec = aticq(srepr.without_differentials(), astrom)
if (
gcrs_coo.data.get_name() == "unitspherical"
or gcrs_coo.data.to_cartesian().x.unit == u.one
):
# if no distance, just use the coordinate direction to yield the
# infinite-distance/no parallax answer
newrep = UnitSphericalRepresentation(
lat=u.Quantity(i_dec, u.radian, copy=False),
lon=u.Quantity(i_ra, u.radian, copy=False),
copy=False,
)
else:
# When there is a distance, apply the parallax/offset to the SSB as the
# last step - ensures round-tripping with the icrs_to_gcrs transform
# the distance in intermedrep is *not* a real distance as it does not
# include the offset back to the SSB
intermedrep = SphericalRepresentation(
lat=u.Quantity(i_dec, u.radian, copy=False),
lon=u.Quantity(i_ra, u.radian, copy=False),
distance=srepr.distance,
copy=False,
)
astrom_eb = CartesianRepresentation(
astrom["eb"], unit=u.au, xyz_axis=-1, copy=False
)
newrep = intermedrep + astrom_eb
return icrs_frame.realize_frame(newrep)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, HCRS)
def gcrs_to_hcrs(gcrs_coo, hcrs_frame):
if np.any(gcrs_coo.obstime != hcrs_frame.obstime):
# if they GCRS obstime and HCRS obstime are not the same, we first
# have to move to a GCRS where they are.
frameattrs = gcrs_coo.get_frame_attr_defaults()
frameattrs["obstime"] = hcrs_frame.obstime
gcrs_coo = gcrs_coo.transform_to(GCRS(**frameattrs))
# set up the astrometry context for ICRS<->GCRS and then convert to ICRS
# coordinate direction
astrom = erfa_astrom.get().apcs(gcrs_coo)
srepr = gcrs_coo.represent_as(SphericalRepresentation)
i_ra, i_dec = aticq(srepr.without_differentials(), astrom)
# convert to Quantity objects
i_ra = u.Quantity(i_ra, u.radian, copy=False)
i_dec = u.Quantity(i_dec, u.radian, copy=False)
if (
gcrs_coo.data.get_name() == "unitspherical"
or gcrs_coo.data.to_cartesian().x.unit == u.one
):
# if no distance, just use the coordinate direction to yield the
# infinite-distance/no parallax answer
newrep = UnitSphericalRepresentation(lat=i_dec, lon=i_ra, copy=False)
else:
# When there is a distance, apply the parallax/offset to the
# Heliocentre as the last step to ensure round-tripping with the
# hcrs_to_gcrs transform
# Note that the distance in intermedrep is *not* a real distance as it
# does not include the offset back to the Heliocentre
intermedrep = SphericalRepresentation(
lat=i_dec, lon=i_ra, distance=srepr.distance, copy=False
)
# astrom['eh'] and astrom['em'] contain Sun to observer unit vector,
# and distance, respectively. Shapes are (X) and (X,3), where (X) is the
# shape resulting from broadcasting the shape of the times object
# against the shape of the pv array.
# broadcast em to eh and scale eh
eh = astrom["eh"] * astrom["em"][..., np.newaxis]
eh = CartesianRepresentation(eh, unit=u.au, xyz_axis=-1, copy=False)
newrep = intermedrep.to_cartesian() + eh
return hcrs_frame.realize_frame(newrep)
_NEED_ORIGIN_HINT = (
"The input {0} coordinates do not have length units. This probably means you"
" created coordinates with lat/lon but no distance. Heliocentric<->ICRS transforms"
" cannot function in this case because there is an origin shift."
)
@frame_transform_graph.transform(AffineTransform, HCRS, ICRS)
def hcrs_to_icrs(hcrs_coo, icrs_frame):
# this is just an origin translation so without a distance it cannot go ahead
if isinstance(hcrs_coo.data, UnitSphericalRepresentation):
raise u.UnitsError(_NEED_ORIGIN_HINT.format(hcrs_coo.__class__.__name__))
return None, get_offset_sun_from_barycenter(
hcrs_coo.obstime, include_velocity=bool(hcrs_coo.data.differentials)
)
@frame_transform_graph.transform(AffineTransform, ICRS, HCRS)
def icrs_to_hcrs(icrs_coo, hcrs_frame):
# this is just an origin translation so without a distance it cannot go ahead
if isinstance(icrs_coo.data, UnitSphericalRepresentation):
raise u.UnitsError(_NEED_ORIGIN_HINT.format(icrs_coo.__class__.__name__))
return None, get_offset_sun_from_barycenter(
hcrs_frame.obstime,
reverse=True,
include_velocity=bool(icrs_coo.data.differentials),
)
# Create loopback transformations
frame_transform_graph._add_merged_transform(CIRS, ICRS, CIRS)
# The CIRS<-> CIRS transform going through ICRS has a
# subtle implication that a point in CIRS is uniquely determined
# by the corresponding astrometric ICRS coordinate *at its
# current time*. This has some subtle implications in terms of GR, but
# is sort of glossed over in the current scheme because we are dropping
# distances anyway.
frame_transform_graph._add_merged_transform(GCRS, ICRS, GCRS)
frame_transform_graph._add_merged_transform(HCRS, ICRS, HCRS)
|
907cc03a7d3a0b3f31cc352f6567d45d76345b037f8d2ee44664555b9fab467b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import units as u
from astropy.coordinates.attributes import CoordinateAttribute, QuantityAttribute
from astropy.coordinates.baseframe import BaseCoordinateFrame, frame_transform_graph
from astropy.coordinates.matrix_utilities import matrix_transpose, rotation_matrix
from astropy.coordinates.transformations import (
DynamicMatrixTransform,
FunctionTransform,
)
_skyoffset_cache = {}
def make_skyoffset_cls(framecls):
"""
Create a new class that is the sky offset frame for a specific class of
origin frame. If such a class has already been created for this frame, the
same class will be returned.
The new class will always have component names for spherical coordinates of
``lon``/``lat``.
Parameters
----------
framecls : `~astropy.coordinates.BaseCoordinateFrame` subclass
The class to create the SkyOffsetFrame of.
Returns
-------
skyoffsetframecls : class
The class for the new skyoffset frame.
Notes
-----
This function is necessary because Astropy's frame transformations depend
on connection between specific frame *classes*. So each type of frame
needs its own distinct skyoffset frame class. This function generates
just that class, as well as ensuring that only one example of such a class
actually gets created in any given python session.
"""
if framecls in _skyoffset_cache:
return _skyoffset_cache[framecls]
# Create a new SkyOffsetFrame subclass for this frame class.
name = "SkyOffset" + framecls.__name__
_SkyOffsetFramecls = type(
name,
(SkyOffsetFrame, framecls),
{
"origin": CoordinateAttribute(frame=framecls, default=None),
# The following two have to be done because otherwise we use the
# defaults of SkyOffsetFrame set by BaseCoordinateFrame.
"_default_representation": framecls._default_representation,
"_default_differential": framecls._default_differential,
"__doc__": SkyOffsetFrame.__doc__,
},
)
@frame_transform_graph.transform(
FunctionTransform, _SkyOffsetFramecls, _SkyOffsetFramecls
)
def skyoffset_to_skyoffset(from_skyoffset_coord, to_skyoffset_frame):
"""Transform between two skyoffset frames."""
# This transform goes through the parent frames on each side.
# from_frame -> from_frame.origin -> to_frame.origin -> to_frame
tmp_from = from_skyoffset_coord.transform_to(from_skyoffset_coord.origin)
tmp_to = tmp_from.transform_to(to_skyoffset_frame.origin)
return tmp_to.transform_to(to_skyoffset_frame)
@frame_transform_graph.transform(
DynamicMatrixTransform, framecls, _SkyOffsetFramecls
)
def reference_to_skyoffset(reference_frame, skyoffset_frame):
"""Convert a reference coordinate to an sky offset frame."""
# Define rotation matrices along the position angle vector, and
# relative to the origin.
origin = skyoffset_frame.origin.spherical
return (
rotation_matrix(-skyoffset_frame.rotation, "x")
@ rotation_matrix(-origin.lat, "y")
@ rotation_matrix(origin.lon, "z")
)
@frame_transform_graph.transform(
DynamicMatrixTransform, _SkyOffsetFramecls, framecls
)
def skyoffset_to_reference(skyoffset_coord, reference_frame):
"""Convert an sky offset frame coordinate to the reference frame"""
# use the forward transform, but just invert it
R = reference_to_skyoffset(reference_frame, skyoffset_coord)
# transpose is the inverse because R is a rotation matrix
return matrix_transpose(R)
_skyoffset_cache[framecls] = _SkyOffsetFramecls
return _SkyOffsetFramecls
class SkyOffsetFrame(BaseCoordinateFrame):
"""
A frame which is relative to some specific position and oriented to match
its frame.
SkyOffsetFrames always have component names for spherical coordinates
of ``lon``/``lat``, *not* the component names for the frame of ``origin``.
This is useful for calculating offsets and dithers in the frame of the sky
relative to an arbitrary position. Coordinates in this frame are both centered on the position specified by the
``origin`` coordinate, *and* they are oriented in the same manner as the
``origin`` frame. E.g., if ``origin`` is `~astropy.coordinates.ICRS`, this
object's ``lat`` will be pointed in the direction of Dec, while ``lon``
will point in the direction of RA.
For more on skyoffset frames, see :ref:`astropy:astropy-skyoffset-frames`.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
origin : coordinate-like
The coordinate which specifies the origin of this frame. Note that this
origin is used purely for on-sky location/rotation. It can have a
``distance`` but it will not be used by this ``SkyOffsetFrame``.
rotation : angle-like
The final rotation of the frame about the ``origin``. The sign of
the rotation is the left-hand rule. That is, an object at a
particular position angle in the un-rotated system will be sent to
the positive latitude (z) direction in the final frame.
Notes
-----
``SkyOffsetFrame`` is a factory class. That is, the objects that it
yields are *not* actually objects of class ``SkyOffsetFrame``. Instead,
distinct classes are created on-the-fly for whatever the frame class is
of ``origin``.
"""
rotation = QuantityAttribute(default=0, unit=u.deg)
origin = CoordinateAttribute(default=None, frame=None)
def __new__(cls, *args, **kwargs):
# We don't want to call this method if we've already set up
# an skyoffset frame for this class.
if not (issubclass(cls, SkyOffsetFrame) and cls is not SkyOffsetFrame):
# We get the origin argument, and handle it here.
try:
origin_frame = kwargs["origin"]
except KeyError:
raise TypeError(
"Can't initialize a SkyOffsetFrame without origin= keyword."
)
if hasattr(origin_frame, "frame"):
origin_frame = origin_frame.frame
newcls = make_skyoffset_cls(origin_frame.__class__)
return newcls.__new__(newcls, *args, **kwargs)
# http://stackoverflow.com/questions/19277399/why-does-object-new-work-differently-in-these-three-cases
# See above for why this is necessary. Basically, because some child
# may override __new__, we must override it here to never pass
# arguments to the object.__new__ method.
if super().__new__ is object.__new__:
return super().__new__(cls)
return super().__new__(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.origin is not None and not self.origin.has_data:
raise ValueError("The origin supplied to SkyOffsetFrame has no data.")
if self.has_data:
self._set_skyoffset_data_lon_wrap_angle(self.data)
@staticmethod
def _set_skyoffset_data_lon_wrap_angle(data):
if hasattr(data, "lon"):
data.lon.wrap_angle = 180.0 * u.deg
return data
def represent_as(self, base, s="base", in_frame_units=False):
"""
Ensure the wrap angle for any spherical
representations.
"""
data = super().represent_as(base, s, in_frame_units=in_frame_units)
self._set_skyoffset_data_lon_wrap_angle(data)
return data
def __reduce__(self):
return (_skyoffset_reducer, (self.origin,), self.__dict__)
def _skyoffset_reducer(origin):
return SkyOffsetFrame.__new__(SkyOffsetFrame, origin=origin)
|
fb58675fac7b6c7ff14d29ed1ec678ec172090c3dae0809b3e0630702e33632b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains the transformation functions for getting to "observed" systems from CIRS.
"""
import erfa
import numpy as np
from astropy import units as u
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.erfa_astrom import erfa_astrom
from astropy.coordinates.representation import (
SphericalRepresentation,
UnitSphericalRepresentation,
)
from astropy.coordinates.transformations import FunctionTransformWithFiniteDifference
from .altaz import AltAz
from .cirs import CIRS
from .hadec import HADec
from .utils import PIOVER2
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, AltAz)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, HADec)
def cirs_to_observed(cirs_coo, observed_frame):
if np.any(observed_frame.location != cirs_coo.location) or np.any(
cirs_coo.obstime != observed_frame.obstime
):
cirs_coo = cirs_coo.transform_to(
CIRS(obstime=observed_frame.obstime, location=observed_frame.location)
)
# if the data are UnitSphericalRepresentation, we can skip the distance calculations
is_unitspherical = (
isinstance(cirs_coo.data, UnitSphericalRepresentation)
or cirs_coo.cartesian.x.unit == u.one
)
# We used to do "astrometric" corrections here, but these are no longer necesssary
# CIRS has proper topocentric behaviour
usrepr = cirs_coo.represent_as(UnitSphericalRepresentation)
cirs_ra = usrepr.lon.to_value(u.radian)
cirs_dec = usrepr.lat.to_value(u.radian)
# first set up the astrometry context for CIRS<->observed
astrom = erfa_astrom.get().apio(observed_frame)
if isinstance(observed_frame, AltAz):
lon, zen, _, _, _ = erfa.atioq(cirs_ra, cirs_dec, astrom)
lat = PIOVER2 - zen
else:
_, _, lon, lat, _ = erfa.atioq(cirs_ra, cirs_dec, astrom)
if is_unitspherical:
rep = UnitSphericalRepresentation(
lat=u.Quantity(lat, u.radian, copy=False),
lon=u.Quantity(lon, u.radian, copy=False),
copy=False,
)
else:
# since we've transformed to CIRS at the observatory location, just use CIRS distance
rep = SphericalRepresentation(
lat=u.Quantity(lat, u.radian, copy=False),
lon=u.Quantity(lon, u.radian, copy=False),
distance=cirs_coo.distance,
copy=False,
)
return observed_frame.realize_frame(rep)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, AltAz, CIRS)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, HADec, CIRS)
def observed_to_cirs(observed_coo, cirs_frame):
usrepr = observed_coo.represent_as(UnitSphericalRepresentation)
lon = usrepr.lon.to_value(u.radian)
lat = usrepr.lat.to_value(u.radian)
if isinstance(observed_coo, AltAz):
# the 'A' indicates zen/az inputs
coord_type = "A"
lat = PIOVER2 - lat
else:
coord_type = "H"
# first set up the astrometry context for ICRS<->CIRS at the observed_coo time
astrom = erfa_astrom.get().apio(observed_coo)
cirs_ra, cirs_dec = erfa.atoiq(coord_type, lon, lat, astrom) << u.radian
if (
isinstance(observed_coo.data, UnitSphericalRepresentation)
or observed_coo.cartesian.x.unit == u.one
):
distance = None
else:
distance = observed_coo.distance
cirs_at_aa_time = CIRS(
ra=cirs_ra,
dec=cirs_dec,
distance=distance,
obstime=observed_coo.obstime,
location=observed_coo.location,
)
# this final transform may be a no-op if the obstimes and locations are the same
return cirs_at_aa_time.transform_to(cirs_frame)
|
dab01bf5f07cc6f7413f397863315572c47c49f15d7736579aff4b0445c080ed | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy import units as u
from astropy.coordinates import representation as r
from astropy.coordinates.attributes import (
EarthLocationAttribute,
QuantityAttribute,
TimeAttribute,
)
from astropy.coordinates.baseframe import (
BaseCoordinateFrame,
RepresentationMapping,
base_doc,
)
from astropy.utils.decorators import format_doc
__all__ = ["AltAz"]
_90DEG = 90 * u.deg
doc_components = """
az : `~astropy.coordinates.Angle`, optional, keyword-only
The Azimuth for this object (``alt`` must also be given and
``representation`` must be None).
alt : `~astropy.coordinates.Angle`, optional, keyword-only
The Altitude for this object (``az`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity` ['length'], optional, keyword-only
The Distance for this object along the line-of-sight.
pm_az_cosalt : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in azimuth (including the ``cos(alt)`` factor) for
this object (``pm_alt`` must also be given).
pm_alt : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in altitude for this object (``pm_az_cosalt`` must
also be given).
radial_velocity : `~astropy.units.Quantity` ['speed'], optional, keyword-only
The radial velocity of this object."""
doc_footer = """
Other parameters
----------------
obstime : `~astropy.time.Time`
The time at which the observation is taken. Used for determining the
position and orientation of the Earth.
location : `~astropy.coordinates.EarthLocation`
The location on the Earth. This can be specified either as an
`~astropy.coordinates.EarthLocation` object or as anything that can be
transformed to an `~astropy.coordinates.ITRS` frame.
pressure : `~astropy.units.Quantity` ['pressure']
The atmospheric pressure as an `~astropy.units.Quantity` with pressure
units. This is necessary for performing refraction corrections.
Setting this to 0 (the default) will disable refraction calculations
when transforming to/from this frame.
temperature : `~astropy.units.Quantity` ['temperature']
The ground-level temperature as an `~astropy.units.Quantity` in
deg C. This is necessary for performing refraction corrections.
relative_humidity : `~astropy.units.Quantity` ['dimensionless'] or number
The relative humidity as a dimensionless quantity between 0 to 1.
This is necessary for performing refraction corrections.
obswl : `~astropy.units.Quantity` ['length']
The average wavelength of observations as an `~astropy.units.Quantity`
with length units. This is necessary for performing refraction
corrections.
Notes
-----
The refraction model is based on that implemented in ERFA, which is fast
but becomes inaccurate for altitudes below about 5 degrees. Near and below
altitudes of 0, it can even give meaningless answers, and in this case
transforming to AltAz and back to another frame can give highly discrepant
results. For much better numerical stability, leave the ``pressure`` at
``0`` (the default), thereby disabling the refraction correction and
yielding "topocentric" horizontal coordinates.
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer)
class AltAz(BaseCoordinateFrame):
"""
A coordinate or frame in the Altitude-Azimuth system (Horizontal
coordinates) with respect to the WGS84 ellipsoid. Azimuth is oriented
East of North (i.e., N=0, E=90 degrees). Altitude is also known as
elevation angle, so this frame is also in the Azimuth-Elevation system.
This frame is assumed to *include* refraction effects if the ``pressure``
frame attribute is non-zero.
The frame attributes are listed under **Other Parameters**, which are
necessary for transforming from AltAz to some other system.
"""
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping("lon", "az"),
RepresentationMapping("lat", "alt"),
]
}
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
obstime = TimeAttribute(default=None)
location = EarthLocationAttribute(default=None)
pressure = QuantityAttribute(default=0, unit=u.hPa)
temperature = QuantityAttribute(default=0, unit=u.deg_C)
relative_humidity = QuantityAttribute(default=0, unit=u.dimensionless_unscaled)
obswl = QuantityAttribute(default=1 * u.micron, unit=u.micron)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def secz(self):
"""
Secant of the zenith angle for this coordinate, a common estimate of
the airmass.
"""
return 1 / np.sin(self.alt)
@property
def zen(self):
"""
The zenith angle (or zenith distance / co-altitude) for this coordinate.
"""
return _90DEG.to(self.alt.unit) - self.alt
# self-transform defined in icrs_observed_transforms.py
|
6255ca9b34b18cdaaf85017757b0945a669fc77261828401f83a073bde045ccf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import units as u
from astropy.coordinates import representation as r
from astropy.coordinates.attributes import (
EarthLocationAttribute,
QuantityAttribute,
TimeAttribute,
)
from astropy.coordinates.baseframe import (
BaseCoordinateFrame,
RepresentationMapping,
base_doc,
)
from astropy.utils.decorators import format_doc
__all__ = ["HADec"]
doc_components = """
ha : `~astropy.coordinates.Angle`, optional, keyword-only
The Hour Angle for this object (``dec`` must also be given and
``representation`` must be None).
dec : `~astropy.coordinates.Angle`, optional, keyword-only
The Declination for this object (``ha`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity` ['length'], optional, keyword-only
The Distance for this object along the line-of-sight.
pm_ha_cosdec : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in hour angle (including the ``cos(dec)`` factor) for
this object (``pm_dec`` must also be given).
pm_dec : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in declination for this object (``pm_ha_cosdec`` must
also be given).
radial_velocity : `~astropy.units.Quantity` ['speed'], optional, keyword-only
The radial velocity of this object."""
doc_footer = """
Other parameters
----------------
obstime : `~astropy.time.Time`
The time at which the observation is taken. Used for determining the
position and orientation of the Earth.
location : `~astropy.coordinates.EarthLocation`
The location on the Earth. This can be specified either as an
`~astropy.coordinates.EarthLocation` object or as anything that can be
transformed to an `~astropy.coordinates.ITRS` frame.
pressure : `~astropy.units.Quantity` ['pressure']
The atmospheric pressure as an `~astropy.units.Quantity` with pressure
units. This is necessary for performing refraction corrections.
Setting this to 0 (the default) will disable refraction calculations
when transforming to/from this frame.
temperature : `~astropy.units.Quantity` ['temperature']
The ground-level temperature as an `~astropy.units.Quantity` in
deg C. This is necessary for performing refraction corrections.
relative_humidity : `~astropy.units.Quantity` ['dimensionless'] or number.
The relative humidity as a dimensionless quantity between 0 to 1.
This is necessary for performing refraction corrections.
obswl : `~astropy.units.Quantity` ['length']
The average wavelength of observations as an `~astropy.units.Quantity`
with length units. This is necessary for performing refraction
corrections.
Notes
-----
The refraction model is based on that implemented in ERFA, which is fast
but becomes inaccurate for altitudes below about 5 degrees. Near and below
altitudes of 0, it can even give meaningless answers, and in this case
transforming to HADec and back to another frame can give highly discrepant
results. For much better numerical stability, leave the ``pressure`` at
``0`` (the default), thereby disabling the refraction correction and
yielding "topocentric" equatorial coordinates.
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer)
class HADec(BaseCoordinateFrame):
"""
A coordinate or frame in the Hour Angle-Declination system (Equatorial
coordinates) with respect to the WGS84 ellipsoid. Hour Angle is oriented
with respect to upper culmination such that the hour angle is negative to
the East and positive to the West.
This frame is assumed to *include* refraction effects if the ``pressure``
frame attribute is non-zero.
The frame attributes are listed under **Other Parameters**, which are
necessary for transforming from HADec to some other system.
"""
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping("lon", "ha", u.hourangle),
RepresentationMapping("lat", "dec"),
]
}
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
obstime = TimeAttribute(default=None)
location = EarthLocationAttribute(default=None)
pressure = QuantityAttribute(default=0, unit=u.hPa)
temperature = QuantityAttribute(default=0, unit=u.deg_C)
relative_humidity = QuantityAttribute(default=0, unit=u.dimensionless_unscaled)
obswl = QuantityAttribute(default=1 * u.micron, unit=u.micron)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.has_data:
self._set_data_lon_wrap_angle(self.data)
@staticmethod
def _set_data_lon_wrap_angle(data):
if hasattr(data, "lon"):
data.lon.wrap_angle = 180.0 * u.deg
return data
def represent_as(self, base, s="base", in_frame_units=False):
"""
Ensure the wrap angle for any spherical
representations.
"""
data = super().represent_as(base, s, in_frame_units=in_frame_units)
self._set_data_lon_wrap_angle(data)
return data
# self-transform defined in icrs_observed_transforms.py
|
5910a3549f59b757ff860c961f7bb82ce89b8c9be188c76b9d18a19db369de60 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.matrix_utilities import matrix_transpose
from astropy.coordinates.transformations import DynamicMatrixTransform
from .fk4 import FK4NoETerms
from .fk5 import FK5
from .utils import EQUINOX_B1950, EQUINOX_J2000
# FK5 to/from FK4 ------------------->
# B1950->J2000 matrix from Murray 1989 A&A 218,325 eqn 28
_B1950_TO_J2000_M = np.array(
[
[0.9999256794956877, -0.0111814832204662, -0.0048590038153592],
[0.0111814832391717, +0.9999374848933135, -0.0000271625947142],
[0.0048590037723143, -0.0000271702937440, +0.9999881946023742],
]
)
_FK4_CORR = (
np.array(
[
[-0.0026455262, -1.1539918689, +2.1111346190],
[+1.1540628161, -0.0129042997, +0.0236021478],
[-2.1112979048, -0.0056024448, +0.0102587734],
]
)
* 1.0e-6
)
def _fk4_B_matrix(obstime):
"""
This is a correction term in the FK4 transformations because FK4 is a
rotating system - see Murray 89 eqn 29
"""
# Note this is *julian century*, not besselian
T = (obstime.jyear - 1950.0) / 100.0
if getattr(T, "shape", ()):
# Ensure we broadcast possibly arrays of times properly.
T.shape += (1, 1)
return _B1950_TO_J2000_M + _FK4_CORR * T
# This transformation can't be static because the observation date is needed.
@frame_transform_graph.transform(DynamicMatrixTransform, FK4NoETerms, FK5)
def fk4_no_e_to_fk5(fk4noecoord, fk5frame):
# Correction terms for FK4 being a rotating system
B = _fk4_B_matrix(fk4noecoord.obstime)
# construct both precession matricies - if the equinoxes are B1950 and
# J2000, these are just identity matricies
pmat1 = fk4noecoord._precession_matrix(fk4noecoord.equinox, EQUINOX_B1950)
pmat2 = fk5frame._precession_matrix(EQUINOX_J2000, fk5frame.equinox)
return pmat2 @ B @ pmat1
# This transformation can't be static because the observation date is needed.
@frame_transform_graph.transform(DynamicMatrixTransform, FK5, FK4NoETerms)
def fk5_to_fk4_no_e(fk5coord, fk4noeframe):
# Get transposed version of the rotating correction terms... so with the
# transpose this takes us from FK5/J200 to FK4/B1950
B = matrix_transpose(_fk4_B_matrix(fk4noeframe.obstime))
# construct both precession matricies - if the equinoxes are B1950 and
# J2000, these are just identity matricies
pmat1 = fk5coord._precession_matrix(fk5coord.equinox, EQUINOX_J2000)
pmat2 = fk4noeframe._precession_matrix(EQUINOX_B1950, fk4noeframe.equinox)
return pmat2 @ B @ pmat1
|
ba17a1341ba3f1c2113e65a7acf14ee1fee9ef362fd8e9c2dc3b23ad12247477 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import units as u
from astropy.coordinates import representation as r
from astropy.coordinates.attributes import DifferentialAttribute
from astropy.coordinates.baseframe import (
BaseCoordinateFrame,
RepresentationMapping,
base_doc,
frame_transform_graph,
)
from astropy.coordinates.transformations import AffineTransform
from astropy.time import Time
from astropy.utils.decorators import format_doc
from .baseradec import BaseRADecFrame
from .baseradec import doc_components as doc_components_radec
from .galactic import Galactic
from .icrs import ICRS
# For speed
J2000 = Time("J2000")
v_bary_Schoenrich2010 = r.CartesianDifferential([11.1, 12.24, 7.25] * u.km / u.s)
__all__ = ["LSR", "GalacticLSR", "LSRK", "LSRD"]
doc_footer_lsr = """
Other parameters
----------------
v_bary : `~astropy.coordinates.CartesianDifferential`
The velocity of the solar system barycenter with respect to the LSR, in
Galactic cartesian velocity components.
"""
@format_doc(base_doc, components=doc_components_radec, footer=doc_footer_lsr)
class LSR(BaseRADecFrame):
r"""A coordinate or frame in the Local Standard of Rest (LSR).
This coordinate frame is axis-aligned and co-spatial with
`~astropy.coordinates.ICRS`, but has a velocity offset relative to the
solar system barycenter to remove the peculiar motion of the sun relative
to the LSR. Roughly, the LSR is the mean velocity of the stars in the solar
neighborhood, but the precise definition of which depends on the study. As
defined in Schönrich et al. (2010): "The LSR is the rest frame at the
location of the Sun of a star that would be on a circular orbit in the
gravitational potential one would obtain by azimuthally averaging away
non-axisymmetric features in the actual Galactic potential." No such orbit
truly exists, but it is still a commonly used velocity frame.
We use default values from Schönrich et al. (2010) for the barycentric
velocity relative to the LSR, which is defined in Galactic (right-handed)
cartesian velocity components
:math:`(U, V, W) = (11.1, 12.24, 7.25)~{{\rm km}}~{{\rm s}}^{{-1}}`. These
values are customizable via the ``v_bary`` argument which specifies the
velocity of the solar system barycenter with respect to the LSR.
The frame attributes are listed under **Other Parameters**.
"""
# frame attributes:
v_bary = DifferentialAttribute(
default=v_bary_Schoenrich2010, allowed_classes=[r.CartesianDifferential]
)
@frame_transform_graph.transform(AffineTransform, ICRS, LSR)
def icrs_to_lsr(icrs_coord, lsr_frame):
v_bary_gal = Galactic(lsr_frame.v_bary.to_cartesian())
v_bary_icrs = v_bary_gal.transform_to(icrs_coord)
v_offset = v_bary_icrs.data.represent_as(r.CartesianDifferential)
offset = r.CartesianRepresentation([0, 0, 0] * u.au, differentials=v_offset)
return None, offset
@frame_transform_graph.transform(AffineTransform, LSR, ICRS)
def lsr_to_icrs(lsr_coord, icrs_frame):
v_bary_gal = Galactic(lsr_coord.v_bary.to_cartesian())
v_bary_icrs = v_bary_gal.transform_to(icrs_frame)
v_offset = v_bary_icrs.data.represent_as(r.CartesianDifferential)
offset = r.CartesianRepresentation([0, 0, 0] * u.au, differentials=-v_offset)
return None, offset
# ------------------------------------------------------------------------------
doc_components_gal = """
l : `~astropy.coordinates.Angle`, optional, keyword-only
The Galactic longitude for this object (``b`` must also be given and
``representation`` must be None).
b : `~astropy.coordinates.Angle`, optional, keyword-only
The Galactic latitude for this object (``l`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity` ['length'], optional, keyword-only
The Distance for this object along the line-of-sight.
(``representation`` must be None).
pm_l_cosb : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in Galactic longitude (including the ``cos(b)`` term)
for this object (``pm_b`` must also be given).
pm_b : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in Galactic latitude for this object (``pm_l_cosb``
must also be given).
radial_velocity : `~astropy.units.Quantity` ['speed'], optional, keyword-only
The radial velocity of this object.
"""
@format_doc(base_doc, components=doc_components_gal, footer=doc_footer_lsr)
class GalacticLSR(BaseCoordinateFrame):
r"""A coordinate or frame in the Local Standard of Rest (LSR), axis-aligned
to the Galactic frame.
This coordinate frame is axis-aligned and co-spatial with
`~astropy.coordinates.ICRS`, but has a velocity offset relative to the
solar system barycenter to remove the peculiar motion of the sun relative
to the LSR. Roughly, the LSR is the mean velocity of the stars in the solar
neighborhood, but the precise definition of which depends on the study. As
defined in Schönrich et al. (2010): "The LSR is the rest frame at the
location of the Sun of a star that would be on a circular orbit in the
gravitational potential one would obtain by azimuthally averaging away
non-axisymmetric features in the actual Galactic potential." No such orbit
truly exists, but it is still a commonly used velocity frame.
We use default values from Schönrich et al. (2010) for the barycentric
velocity relative to the LSR, which is defined in Galactic (right-handed)
cartesian velocity components
:math:`(U, V, W) = (11.1, 12.24, 7.25)~{{\rm km}}~{{\rm s}}^{{-1}}`. These
values are customizable via the ``v_bary`` argument which specifies the
velocity of the solar system barycenter with respect to the LSR.
The frame attributes are listed under **Other Parameters**.
"""
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping("lon", "l"),
RepresentationMapping("lat", "b"),
]
}
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
# frame attributes:
v_bary = DifferentialAttribute(default=v_bary_Schoenrich2010)
@frame_transform_graph.transform(AffineTransform, Galactic, GalacticLSR)
def galactic_to_galacticlsr(galactic_coord, lsr_frame):
v_bary_gal = Galactic(lsr_frame.v_bary.to_cartesian())
v_offset = v_bary_gal.data.represent_as(r.CartesianDifferential)
offset = r.CartesianRepresentation([0, 0, 0] * u.au, differentials=v_offset)
return None, offset
@frame_transform_graph.transform(AffineTransform, GalacticLSR, Galactic)
def galacticlsr_to_galactic(lsr_coord, galactic_frame):
v_bary_gal = Galactic(lsr_coord.v_bary.to_cartesian())
v_offset = v_bary_gal.data.represent_as(r.CartesianDifferential)
offset = r.CartesianRepresentation([0, 0, 0] * u.au, differentials=-v_offset)
return None, offset
# ------------------------------------------------------------------------------
# The LSRK velocity frame, defined as having a velocity of 20 km/s towards
# RA=270 Dec=30 (B1900) relative to the solar system Barycenter. This is defined
# in:
#
# Gordon 1975, Methods of Experimental Physics: Volume 12:
# Astrophysics, Part C: Radio Observations - Section 6.1.5.
class LSRK(BaseRADecFrame):
r"""A coordinate or frame in the Kinematic Local Standard of Rest (LSR).
This frame is defined as having a velocity of 20 km/s towards RA=270 Dec=30
(B1900) relative to the solar system Barycenter. This is defined in:
Gordon 1975, Methods of Experimental Physics: Volume 12:
Astrophysics, Part C: Radio Observations - Section 6.1.5.
This coordinate frame is axis-aligned and co-spatial with
`~astropy.coordinates.ICRS`, but has a velocity offset relative to the
solar system barycenter to remove the peculiar motion of the sun relative
to the LSRK.
"""
# NOTE: To avoid a performance penalty at import time, we hard-code the ICRS
# offsets here. The code to generate the offsets is provided for reproducibility.
# GORDON1975_V_BARY = 20*u.km/u.s
# GORDON1975_DIRECTION = FK4(ra=270*u.deg, dec=30*u.deg, equinox='B1900')
# V_OFFSET_LSRK = ((GORDON1975_V_BARY * GORDON1975_DIRECTION.transform_to(ICRS()).data)
# .represent_as(r.CartesianDifferential))
V_OFFSET_LSRK = r.CartesianDifferential(
[0.28999706839034606, -17.317264789717928, 10.00141199546947] * u.km / u.s
)
ICRS_LSRK_OFFSET = r.CartesianRepresentation(
[0, 0, 0] * u.au, differentials=V_OFFSET_LSRK
)
LSRK_ICRS_OFFSET = r.CartesianRepresentation(
[0, 0, 0] * u.au, differentials=-V_OFFSET_LSRK
)
@frame_transform_graph.transform(AffineTransform, ICRS, LSRK)
def icrs_to_lsrk(icrs_coord, lsr_frame):
return None, ICRS_LSRK_OFFSET
@frame_transform_graph.transform(AffineTransform, LSRK, ICRS)
def lsrk_to_icrs(lsr_coord, icrs_frame):
return None, LSRK_ICRS_OFFSET
# ------------------------------------------------------------------------------
# The LSRD velocity frame, defined as a velocity of U=9 km/s, V=12 km/s,
# and W=7 km/s in Galactic coordinates or 16.552945 km/s
# towards l=53.13 b=25.02. This is defined in:
#
# Delhaye 1965, Solar Motion and Velocity Distribution of
# Common Stars.
class LSRD(BaseRADecFrame):
r"""A coordinate or frame in the Dynamical Local Standard of Rest (LSRD)
This frame is defined as a velocity of U=9 km/s, V=12 km/s,
and W=7 km/s in Galactic coordinates or 16.552945 km/s
towards l=53.13 b=25.02. This is defined in:
Delhaye 1965, Solar Motion and Velocity Distribution of
Common Stars.
This coordinate frame is axis-aligned and co-spatial with
`~astropy.coordinates.ICRS`, but has a velocity offset relative to the
solar system barycenter to remove the peculiar motion of the sun relative
to the LSRD.
"""
# NOTE: To avoid a performance penalty at import time, we hard-code the ICRS
# offsets here. The code to generate the offsets is provided for reproducibility.
# V_BARY_DELHAYE1965 = r.CartesianDifferential([9, 12, 7] * u.km/u.s)
# V_OFFSET_LSRD = (Galactic(V_BARY_DELHAYE1965.to_cartesian()).transform_to(ICRS()).data
# .represent_as(r.CartesianDifferential))
V_OFFSET_LSRD = r.CartesianDifferential(
[-0.6382306360182073, -14.585424483191094, 7.8011572411006815] * u.km / u.s
)
ICRS_LSRD_OFFSET = r.CartesianRepresentation(
[0, 0, 0] * u.au, differentials=V_OFFSET_LSRD
)
LSRD_ICRS_OFFSET = r.CartesianRepresentation(
[0, 0, 0] * u.au, differentials=-V_OFFSET_LSRD
)
@frame_transform_graph.transform(AffineTransform, ICRS, LSRD)
def icrs_to_lsrd(icrs_coord, lsr_frame):
return None, ICRS_LSRD_OFFSET
@frame_transform_graph.transform(AffineTransform, LSRD, ICRS)
def lsrd_to_icrs(lsr_coord, icrs_frame):
return None, LSRD_ICRS_OFFSET
# ------------------------------------------------------------------------------
# Create loopback transformations
frame_transform_graph._add_merged_transform(LSR, ICRS, LSR)
frame_transform_graph._add_merged_transform(GalacticLSR, Galactic, GalacticLSR)
|
1589940eb8e2b7964b6af5d666dde599b17192fb15e964b37a61e81211fed3b5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.matrix_utilities import matrix_transpose, rotation_matrix
from astropy.coordinates.transformations import DynamicMatrixTransform
from .fk4 import FK4NoETerms
from .fk5 import FK5
from .galactic import Galactic
from .utils import EQUINOX_B1950, EQUINOX_J2000
# Galactic to/from FK4/FK5 ----------------------->
# can't be static because the equinox is needed
@frame_transform_graph.transform(DynamicMatrixTransform, FK5, Galactic)
def fk5_to_gal(fk5coord, galframe):
# need precess to J2000 first
return (
rotation_matrix(180 - Galactic._lon0_J2000.degree, "z")
@ rotation_matrix(90 - Galactic._ngp_J2000.dec.degree, "y")
@ rotation_matrix(Galactic._ngp_J2000.ra.degree, "z")
@ fk5coord._precession_matrix(fk5coord.equinox, EQUINOX_J2000)
)
@frame_transform_graph.transform(DynamicMatrixTransform, Galactic, FK5)
def _gal_to_fk5(galcoord, fk5frame):
return matrix_transpose(fk5_to_gal(fk5frame, galcoord))
@frame_transform_graph.transform(DynamicMatrixTransform, FK4NoETerms, Galactic)
def fk4_to_gal(fk4coords, galframe):
return (
rotation_matrix(180 - Galactic._lon0_B1950.degree, "z")
@ rotation_matrix(90 - Galactic._ngp_B1950.dec.degree, "y")
@ rotation_matrix(Galactic._ngp_B1950.ra.degree, "z")
@ fk4coords._precession_matrix(fk4coords.equinox, EQUINOX_B1950)
)
@frame_transform_graph.transform(DynamicMatrixTransform, Galactic, FK4NoETerms)
def gal_to_fk4(galcoords, fk4frame):
return matrix_transpose(fk4_to_gal(fk4frame, galcoords))
|
2d15495c9df319d401e1e43de90f6ac7ccb458d6ea21bed2d659f5ad7a20a26d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import units as u
from astropy.coordinates import representation as r
from astropy.coordinates.angles import Angle
from astropy.coordinates.baseframe import (
BaseCoordinateFrame,
RepresentationMapping,
base_doc,
)
from astropy.utils.decorators import format_doc
from .fk4 import FK4NoETerms
# these are needed for defining the NGP
from .fk5 import FK5
__all__ = ["Galactic"]
doc_components = """
l : `~astropy.coordinates.Angle`, optional, keyword-only
The Galactic longitude for this object (``b`` must also be given and
``representation`` must be None).
b : `~astropy.coordinates.Angle`, optional, keyword-only
The Galactic latitude for this object (``l`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity` ['length'], optional, keyword-only
The Distance for this object along the line-of-sight.
pm_l_cosb : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in Galactic longitude (including the ``cos(b)`` term)
for this object (``pm_b`` must also be given).
pm_b : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in Galactic latitude for this object (``pm_l_cosb``
must also be given).
radial_velocity : `~astropy.units.Quantity` ['speed'], optional, keyword-only
The radial velocity of this object.
"""
doc_footer = """
Notes
-----
.. [1] Blaauw, A.; Gum, C. S.; Pawsey, J. L.; Westerhout, G. (1960), "The
new I.A.U. system of galactic coordinates (1958 revision),"
`MNRAS, Vol 121, pp.123 <https://ui.adsabs.harvard.edu/abs/1960MNRAS.121..123B>`_.
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer)
class Galactic(BaseCoordinateFrame):
"""
A coordinate or frame in the Galactic coordinate system.
This frame is used in a variety of Galactic contexts because it has as its
x-y plane the plane of the Milky Way. The positive x direction (i.e., the
l=0, b=0 direction) points to the center of the Milky Way and the z-axis
points toward the North Galactic Pole (following the IAU's 1958 definition
[1]_). However, unlike the `~astropy.coordinates.Galactocentric` frame, the
*origin* of this frame in 3D space is the solar system barycenter, not
the center of the Milky Way.
"""
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping("lon", "l"),
RepresentationMapping("lat", "b"),
],
r.CartesianRepresentation: [
RepresentationMapping("x", "u"),
RepresentationMapping("y", "v"),
RepresentationMapping("z", "w"),
],
r.CartesianDifferential: [
RepresentationMapping("d_x", "U", u.km / u.s),
RepresentationMapping("d_y", "V", u.km / u.s),
RepresentationMapping("d_z", "W", u.km / u.s),
],
}
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
# North galactic pole and zeropoint of l in FK4/FK5 coordinates. Needed for
# transformations to/from FK4/5
# These are from the IAU's definition of galactic coordinates
_ngp_B1950 = FK4NoETerms(ra=192.25 * u.degree, dec=27.4 * u.degree)
_lon0_B1950 = Angle(123, u.degree)
# These are *not* from Reid & Brunthaler 2004 - instead, they were
# derived by doing:
#
# >>> FK4NoETerms(ra=192.25*u.degree, dec=27.4*u.degree).transform_to(FK5())
#
# This gives better consistency with other codes than using the values
# from Reid & Brunthaler 2004 and the best self-consistency between FK5
# -> Galactic and FK5 -> FK4 -> Galactic. The lon0 angle was found by
# optimizing the self-consistency.
_ngp_J2000 = FK5(ra=192.8594812065348 * u.degree, dec=27.12825118085622 * u.degree)
_lon0_J2000 = Angle(122.9319185680026, u.degree)
|
dcfeb3e4699ebdab29f6f3ee7a1f6ae92cdbd8a98f1bf264f8536e57c0146dd1 | import erfa
import numpy as np
from astropy import units as u
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.matrix_utilities import matrix_transpose, rotation_matrix
from astropy.coordinates.representation import CartesianRepresentation
from astropy.coordinates.transformations import FunctionTransformWithFiniteDifference
from .altaz import AltAz
from .hadec import HADec
from .itrs import ITRS
# Minimum cos(alt) and sin(alt) for refraction purposes
CELMIN = 1e-6
SELMIN = 0.05
# Latitude of the north pole.
NORTH_POLE = 90.0 * u.deg
def itrs_to_altaz_mat(lon, lat):
# form ITRS to AltAz matrix
# AltAz frame is left handed
minus_x = np.eye(3)
minus_x[0][0] = -1.0
mat = minus_x @ rotation_matrix(NORTH_POLE - lat, "y") @ rotation_matrix(lon, "z")
return mat
def itrs_to_hadec_mat(lon):
# form ITRS to HADec matrix
# HADec frame is left handed
minus_y = np.eye(3)
minus_y[1][1] = -1.0
mat = minus_y @ rotation_matrix(lon, "z")
return mat
def altaz_to_hadec_mat(lat):
# form AltAz to HADec matrix
z180 = np.eye(3)
z180[0][0] = -1.0
z180[1][1] = -1.0
mat = z180 @ rotation_matrix(NORTH_POLE - lat, "y")
return mat
def add_refraction(aa_crepr, observed_frame):
# add refraction to AltAz cartesian representation
refa, refb = erfa.refco(
observed_frame.pressure.to_value(u.hPa),
observed_frame.temperature.to_value(u.deg_C),
observed_frame.relative_humidity.value,
observed_frame.obswl.to_value(u.micron),
)
# reference: erfa.atioq()
norm, uv = erfa.pn(aa_crepr.get_xyz(xyz_axis=-1).to_value())
# Cosine and sine of altitude, with precautions.
sel = np.maximum(uv[..., 2], SELMIN)
cel = np.maximum(np.sqrt(uv[..., 0] ** 2 + uv[..., 1] ** 2), CELMIN)
# A*tan(z)+B*tan^3(z) model, with Newton-Raphson correction.
tan_z = cel / sel
w = refb * tan_z**2
delta_el = (refa + w) * tan_z / (1.0 + (refa + 3.0 * w) / (sel**2))
# Apply the change, giving observed vector
cosdel = 1.0 - 0.5 * delta_el**2
f = cosdel - delta_el * sel / cel
uv[..., 0] *= f
uv[..., 1] *= f
uv[..., 2] = cosdel * uv[..., 2] + delta_el * cel
# Need to renormalize to get agreement with CIRS->Observed on distance
norm2, uv = erfa.pn(uv)
uv = erfa.sxp(norm, uv)
return CartesianRepresentation(uv, xyz_axis=-1, unit=aa_crepr.x.unit, copy=False)
def remove_refraction(aa_crepr, observed_frame):
# remove refraction from AltAz cartesian representation
refa, refb = erfa.refco(
observed_frame.pressure.to_value(u.hPa),
observed_frame.temperature.to_value(u.deg_C),
observed_frame.relative_humidity.value,
observed_frame.obswl.to_value(u.micron),
)
# reference: erfa.atoiq()
norm, uv = erfa.pn(aa_crepr.get_xyz(xyz_axis=-1).to_value())
# Cosine and sine of altitude, with precautions.
sel = np.maximum(uv[..., 2], SELMIN)
cel = np.sqrt(uv[..., 0] ** 2 + uv[..., 1] ** 2)
# A*tan(z)+B*tan^3(z) model
tan_z = cel / sel
delta_el = (refa + refb * tan_z**2) * tan_z
# Apply the change, giving observed vector.
az, el = erfa.c2s(uv)
el -= delta_el
uv = erfa.s2c(az, el)
uv = erfa.sxp(norm, uv)
return CartesianRepresentation(uv, xyz_axis=-1, unit=aa_crepr.x.unit, copy=False)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ITRS, AltAz)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ITRS, HADec)
def itrs_to_observed(itrs_coo, observed_frame):
if np.any(itrs_coo.location != observed_frame.location) or np.any(
itrs_coo.obstime != observed_frame.obstime
):
# This transform will go through the CIRS and alter stellar aberration.
itrs_coo = itrs_coo.transform_to(
ITRS(obstime=observed_frame.obstime, location=observed_frame.location)
)
lon, lat, height = observed_frame.location.to_geodetic("WGS84")
if isinstance(observed_frame, AltAz) or (observed_frame.pressure > 0.0):
crepr = itrs_coo.cartesian.transform(itrs_to_altaz_mat(lon, lat))
if observed_frame.pressure > 0.0:
crepr = add_refraction(crepr, observed_frame)
if isinstance(observed_frame, HADec):
crepr = crepr.transform(altaz_to_hadec_mat(lat))
else:
crepr = itrs_coo.cartesian.transform(itrs_to_hadec_mat(lon))
return observed_frame.realize_frame(crepr)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, AltAz, ITRS)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, HADec, ITRS)
def observed_to_itrs(observed_coo, itrs_frame):
lon, lat, height = observed_coo.location.to_geodetic("WGS84")
if isinstance(observed_coo, AltAz) or (observed_coo.pressure > 0.0):
crepr = observed_coo.cartesian
if observed_coo.pressure > 0.0:
if isinstance(observed_coo, HADec):
crepr = crepr.transform(matrix_transpose(altaz_to_hadec_mat(lat)))
crepr = remove_refraction(crepr, observed_coo)
crepr = crepr.transform(matrix_transpose(itrs_to_altaz_mat(lon, lat)))
else:
crepr = observed_coo.cartesian.transform(
matrix_transpose(itrs_to_hadec_mat(lon))
)
itrs_at_obs_time = ITRS(
crepr, obstime=observed_coo.obstime, location=observed_coo.location
)
# This final transform may be a no-op if the obstimes and locations are the same.
# Otherwise, this transform will go through the CIRS and alter stellar aberration.
return itrs_at_obs_time.transform_to(itrs_frame)
|
8fde57333439d643f911edc005a2794ce931a91c5f63f836d2f21f2e8d2ead43 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.matrix_utilities import matrix_transpose, rotation_matrix
from astropy.coordinates.transformations import DynamicMatrixTransform
from .fk5 import FK5
from .icrs import ICRS
from .utils import EQUINOX_J2000
def _icrs_to_fk5_matrix():
"""
B-matrix from USNO circular 179. Used by the ICRS->FK5 transformation
functions.
"""
eta0 = -19.9 / 3600000.0
xi0 = 9.1 / 3600000.0
da0 = -22.9 / 3600000.0
return (
rotation_matrix(-eta0, "x")
@ rotation_matrix(xi0, "y")
@ rotation_matrix(da0, "z")
)
# define this here because it only needs to be computed once
_ICRS_TO_FK5_J2000_MAT = _icrs_to_fk5_matrix()
@frame_transform_graph.transform(DynamicMatrixTransform, ICRS, FK5)
def icrs_to_fk5(icrscoord, fk5frame):
# ICRS is by design very close to J2000 equinox
pmat = fk5frame._precession_matrix(EQUINOX_J2000, fk5frame.equinox)
return pmat @ _ICRS_TO_FK5_J2000_MAT
# can't be static because the equinox is needed
@frame_transform_graph.transform(DynamicMatrixTransform, FK5, ICRS)
def fk5_to_icrs(fk5coord, icrsframe):
# ICRS is by design very close to J2000 equinox
pmat = fk5coord._precession_matrix(fk5coord.equinox, EQUINOX_J2000)
return matrix_transpose(_ICRS_TO_FK5_J2000_MAT) @ pmat
|
834d4161ee63d72c4117d0b595f930f4f67b924f625e45345fdd2d13f0791619 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.coordinates.attributes import EarthLocationAttribute, TimeAttribute
from astropy.coordinates.baseframe import base_doc
from astropy.utils.decorators import format_doc
from .baseradec import BaseRADecFrame, doc_components
from .utils import DEFAULT_OBSTIME, EARTH_CENTER
__all__ = ["CIRS"]
doc_footer = """
Other parameters
----------------
obstime : `~astropy.time.Time`
The time at which the observation is taken. Used for determining the
position of the Earth and its precession.
location : `~astropy.coordinates.EarthLocation`
The location on the Earth. This can be specified either as an
`~astropy.coordinates.EarthLocation` object or as anything that can be
transformed to an `~astropy.coordinates.ITRS` frame. The default is the
centre of the Earth.
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer)
class CIRS(BaseRADecFrame):
"""
A coordinate or frame in the Celestial Intermediate Reference System (CIRS).
The frame attributes are listed under **Other Parameters**.
"""
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
location = EarthLocationAttribute(default=EARTH_CENTER)
# The "self-transform" is defined in icrs_cirs_transformations.py, because in
# the current implementation it goes through ICRS (like GCRS)
|
5e6efdaf2602b90cf9f21ac139e63e5e3b4666296e64c8a1ad5651995f7f6fa7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Coordinate frames tied to the Equator and Equinox of Earth.
TEME is a True equator, Mean Equinox coordinate frame used in NORAD TLE
satellite files.
TETE is a True equator, True Equinox coordinate frame often called the
"apparent" coordinates. It is the same frame as used by JPL Horizons
and can be combined with Local Apparent Sidereal Time to calculate the
hour angle.
"""
from astropy.coordinates.attributes import EarthLocationAttribute, TimeAttribute
from astropy.coordinates.baseframe import BaseCoordinateFrame, base_doc
from astropy.coordinates.builtin_frames.baseradec import BaseRADecFrame, doc_components
from astropy.coordinates.representation import (
CartesianDifferential,
CartesianRepresentation,
)
from astropy.utils.decorators import format_doc
from .utils import DEFAULT_OBSTIME, EARTH_CENTER
__all__ = ["TEME", "TETE"]
doc_footer_teme = """
Other parameters
----------------
obstime : `~astropy.time.Time`
The time at which the frame is defined. Used for determining the
position of the Earth.
"""
doc_footer_tete = """
Other parameters
----------------
obstime : `~astropy.time.Time`
The time at which the observation is taken. Used for determining the
position of the Earth.
location : `~astropy.coordinates.EarthLocation`
The location on the Earth. This can be specified either as an
`~astropy.coordinates.EarthLocation` object or as anything that can be
transformed to an `~astropy.coordinates.ITRS` frame. The default is the
centre of the Earth.
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer_tete)
class TETE(BaseRADecFrame):
"""
An equatorial coordinate or frame using the True Equator and True Equinox (TETE).
Equatorial coordinate frames measure RA with respect to the equinox and declination
with with respect to the equator. The location of the equinox and equator vary due
the gravitational torques on the oblate Earth. This variation is split into precession
and nutation, although really they are two aspects of a single phenomena. The smooth,
long term variation is known as precession, whilst smaller, periodic components are
called nutation.
Calculation of the true equator and equinox involves the application of both precession
and nutation, whilst only applying precession gives a mean equator and equinox.
TETE coordinates are often referred to as "apparent" coordinates, or
"apparent place". TETE is the apparent coordinate system used by JPL Horizons
and is the correct coordinate system to use when combining the right ascension
with local apparent sidereal time to calculate the apparent (TIRS) hour angle.
For more background on TETE, see the references provided in the
:ref:`astropy:astropy-coordinates-seealso` section of the documentation.
Of particular note are Sections 5 and 6 of
`USNO Circular 179 <https://arxiv.org/abs/astro-ph/0602086>`_) and
especially the diagram at the top of page 57.
This frame also includes frames that are defined *relative* to the center of the Earth,
but that are offset (in both position and velocity) from the center of the Earth. You
may see such non-geocentric coordinates referred to as "topocentric".
The frame attributes are listed under **Other Parameters**.
"""
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
location = EarthLocationAttribute(default=EARTH_CENTER)
# Self transform goes through ICRS and is defined in icrs_cirs_transforms.py
@format_doc(base_doc, components="", footer=doc_footer_teme)
class TEME(BaseCoordinateFrame):
"""
A coordinate or frame in the True Equator Mean Equinox frame (TEME).
This frame is a geocentric system similar to CIRS or geocentric apparent place,
except that the mean sidereal time is used to rotate from TIRS. TEME coordinates
are most often used in combination with orbital data for satellites in the
two-line-ephemeris format.
Different implementations of the TEME frame exist. For clarity, this frame follows the
conventions and relations to other frames that are set out in Vallado et al (2006).
For more background on TEME, see the references provided in the
:ref:`astropy:astropy-coordinates-seealso` section of the documentation.
"""
default_representation = CartesianRepresentation
default_differential = CartesianDifferential
obstime = TimeAttribute()
# Transformation functions for getting to/from TEME and ITRS are in
# intermediate rotation transforms.py
|
ec13f3de1d92cfd13840cd73b08dbbf60a0e61356769e716ff6f987d77f89029 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import re
from copy import deepcopy
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import EarthLocation, SkyCoord, galactocentric_frame_defaults
from astropy.coordinates import representation as r
from astropy.coordinates.attributes import (
Attribute,
CoordinateAttribute,
DifferentialAttribute,
EarthLocationAttribute,
QuantityAttribute,
TimeAttribute,
)
from astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping
from astropy.coordinates.builtin_frames import (
FK4,
FK5,
GCRS,
HCRS,
ICRS,
ITRS,
AltAz,
Galactic,
Galactocentric,
HADec,
)
from astropy.coordinates.representation import (
REPRESENTATION_CLASSES,
CartesianDifferential,
)
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.time import Time
from astropy.units import allclose
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
from .test_representation import unitphysics # this fixture is used below # noqa: F401
def setup_function(func):
"""Copy original 'REPRESENTATIONCLASSES' as attribute in function."""
func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES)
def teardown_function(func):
"""Reset REPRESENTATION_CLASSES to original value."""
REPRESENTATION_CLASSES.clear()
REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG)
def test_frame_attribute_descriptor():
"""Unit tests of the Attribute descriptor."""
class TestAttributes:
attr_none = Attribute()
attr_2 = Attribute(default=2)
attr_3_attr2 = Attribute(default=3, secondary_attribute="attr_2")
attr_none_attr2 = Attribute(default=None, secondary_attribute="attr_2")
attr_none_nonexist = Attribute(default=None, secondary_attribute="nonexist")
t = TestAttributes()
# Defaults
assert t.attr_none is None
assert t.attr_2 == 2
assert t.attr_3_attr2 == 3
assert t.attr_none_attr2 == t.attr_2
assert t.attr_none_nonexist is None # No default and non-existent secondary attr
# Setting values via '_'-prefixed internal vars
# (as would normally done in __init__)
t._attr_none = 10
assert t.attr_none == 10
t._attr_2 = 20
assert t.attr_2 == 20
assert t.attr_3_attr2 == 3
assert t.attr_none_attr2 == t.attr_2
t._attr_none_attr2 = 40
assert t.attr_none_attr2 == 40
# Make sure setting values via public attribute fails
with pytest.raises(AttributeError) as err:
t.attr_none = 5
assert "Cannot set frame attribute" in str(err.value)
def test_frame_subclass_attribute_descriptor():
"""Unit test of the attribute descriptors in subclasses."""
_EQUINOX_B1980 = Time("B1980", scale="tai")
class MyFK4(FK4):
# equinox inherited from FK4, obstime overridden, and newattr is new
obstime = TimeAttribute(default=_EQUINOX_B1980)
newattr = Attribute(default="newattr")
mfk4 = MyFK4()
assert mfk4.equinox.value == "B1950.000"
assert mfk4.obstime.value == "B1980.000"
assert mfk4.newattr == "newattr"
with pytest.warns(AstropyDeprecationWarning):
assert set(mfk4.get_frame_attr_names()) == {"equinox", "obstime", "newattr"}
mfk4 = MyFK4(equinox="J1980.0", obstime="J1990.0", newattr="world")
assert mfk4.equinox.value == "J1980.000"
assert mfk4.obstime.value == "J1990.000"
assert mfk4.newattr == "world"
def test_frame_multiple_inheritance_attribute_descriptor():
"""
Ensure that all attributes are accumulated in case of inheritance from
multiple BaseCoordinateFrames. See
https://github.com/astropy/astropy/pull/11099#issuecomment-735829157
"""
class Frame1(BaseCoordinateFrame):
attr1 = Attribute()
class Frame2(BaseCoordinateFrame):
attr2 = Attribute()
class Frame3(Frame1, Frame2):
pass
assert len(Frame3.frame_attributes) == 2
assert "attr1" in Frame3.frame_attributes
assert "attr2" in Frame3.frame_attributes
# In case the same attribute exists in both frames, the one from the
# left-most class in the MRO should take precedence
class Frame4(BaseCoordinateFrame):
attr1 = Attribute()
attr2 = Attribute()
class Frame5(Frame1, Frame4):
pass
assert Frame5.frame_attributes["attr1"] is Frame1.frame_attributes["attr1"]
assert Frame5.frame_attributes["attr2"] is Frame4.frame_attributes["attr2"]
def test_differentialattribute():
# Test logic of passing input through to allowed class
vel = [1, 2, 3] * u.km / u.s
dif = r.CartesianDifferential(vel)
class TestFrame(BaseCoordinateFrame):
attrtest = DifferentialAttribute(
default=dif, allowed_classes=[r.CartesianDifferential]
)
frame1 = TestFrame()
frame2 = TestFrame(attrtest=dif)
frame3 = TestFrame(attrtest=vel)
assert np.all(frame1.attrtest.d_xyz == frame2.attrtest.d_xyz)
assert np.all(frame1.attrtest.d_xyz == frame3.attrtest.d_xyz)
# This shouldn't work if there is more than one allowed class:
class TestFrame2(BaseCoordinateFrame):
attrtest = DifferentialAttribute(
default=dif,
allowed_classes=[r.CartesianDifferential, r.CylindricalDifferential],
)
frame1 = TestFrame2()
frame2 = TestFrame2(attrtest=dif)
with pytest.raises(TypeError):
TestFrame2(attrtest=vel)
def test_create_data_frames():
# from repr
i1 = ICRS(r.SphericalRepresentation(1 * u.deg, 2 * u.deg, 3 * u.kpc))
i2 = ICRS(r.UnitSphericalRepresentation(lon=1 * u.deg, lat=2 * u.deg))
# from preferred name
i3 = ICRS(ra=1 * u.deg, dec=2 * u.deg, distance=3 * u.kpc)
i4 = ICRS(ra=1 * u.deg, dec=2 * u.deg)
assert i1.data.lat == i3.data.lat
assert i1.data.lon == i3.data.lon
assert i1.data.distance == i3.data.distance
assert i2.data.lat == i4.data.lat
assert i2.data.lon == i4.data.lon
# now make sure the preferred names work as properties
assert_allclose(i1.ra, i3.ra)
assert_allclose(i2.ra, i4.ra)
assert_allclose(i1.distance, i3.distance)
with pytest.raises(AttributeError):
i1.ra = [11.0] * u.deg
def test_create_orderered_data():
TOL = 1e-10 * u.deg
i = ICRS(1 * u.deg, 2 * u.deg)
assert (i.ra - 1 * u.deg) < TOL
assert (i.dec - 2 * u.deg) < TOL
g = Galactic(1 * u.deg, 2 * u.deg)
assert (g.l - 1 * u.deg) < TOL
assert (g.b - 2 * u.deg) < TOL
a = AltAz(1 * u.deg, 2 * u.deg)
assert (a.az - 1 * u.deg) < TOL
assert (a.alt - 2 * u.deg) < TOL
with pytest.raises(TypeError):
ICRS(1 * u.deg, 2 * u.deg, 1 * u.deg, 2 * u.deg)
with pytest.raises(TypeError):
sph = r.SphericalRepresentation(1 * u.deg, 2 * u.deg, 3 * u.kpc)
ICRS(sph, 1 * u.deg, 2 * u.deg)
def test_create_nodata_frames():
i = ICRS()
assert len(i.frame_attributes) == 0
f5 = FK5()
assert f5.equinox == FK5.get_frame_attr_defaults()["equinox"]
f4 = FK4()
assert f4.equinox == FK4.get_frame_attr_defaults()["equinox"]
# obstime is special because it's a property that uses equinox if obstime is not set
assert f4.obstime in (
FK4.get_frame_attr_defaults()["obstime"],
FK4.get_frame_attr_defaults()["equinox"],
)
def test_no_data_nonscalar_frames():
a1 = AltAz(
obstime=Time("2012-01-01") + np.arange(10.0) * u.day,
temperature=np.ones((3, 1)) * u.deg_C,
)
assert a1.obstime.shape == (3, 10)
assert a1.temperature.shape == (3, 10)
assert a1.shape == (3, 10)
with pytest.raises(ValueError) as exc:
AltAz(
obstime=Time("2012-01-01") + np.arange(10.0) * u.day,
temperature=np.ones((3,)) * u.deg_C,
)
assert "inconsistent shapes" in str(exc.value)
def test_frame_repr():
i = ICRS()
assert repr(i) == "<ICRS Frame>"
f5 = FK5()
assert repr(f5).startswith("<FK5 Frame (equinox=")
i2 = ICRS(ra=1 * u.deg, dec=2 * u.deg)
i3 = ICRS(ra=1 * u.deg, dec=2 * u.deg, distance=3 * u.kpc)
assert repr(i2) == "<ICRS Coordinate: (ra, dec) in deg\n (1., 2.)>"
assert (
repr(i3)
== "<ICRS Coordinate: (ra, dec, distance) in (deg, deg, kpc)\n (1., 2., 3.)>"
)
# try with arrays
i2 = ICRS(ra=[1.1, 2.1] * u.deg, dec=[2.1, 3.1] * u.deg)
i3 = ICRS(
ra=[1.1, 2.1] * u.deg, dec=[-15.6, 17.1] * u.deg, distance=[11.0, 21.0] * u.kpc
)
assert (
repr(i2) == "<ICRS Coordinate: (ra, dec) in deg\n [(1.1, 2.1), (2.1, 3.1)]>"
)
assert (
repr(i3) == "<ICRS Coordinate: (ra, dec, distance) in (deg, deg, kpc)\n"
" [(1.1, -15.6, 11.), (2.1, 17.1, 21.)]>"
)
def test_frame_repr_vels():
i = ICRS(
ra=1 * u.deg,
dec=2 * u.deg,
pm_ra_cosdec=1 * u.marcsec / u.yr,
pm_dec=2 * u.marcsec / u.yr,
)
# unit comes out as mas/yr because of the preferred units defined in the
# frame RepresentationMapping
assert (
repr(i) == "<ICRS Coordinate: (ra, dec) in deg\n"
" (1., 2.)\n"
" (pm_ra_cosdec, pm_dec) in mas / yr\n"
" (1., 2.)>"
)
def test_converting_units():
# this is a regular expression that with split (see below) removes what's
# the decimal point to fix rounding problems
rexrepr = re.compile(r"(.*?=\d\.).*?( .*?=\d\.).*?( .*)")
# Use values that aren't subject to rounding down to X.9999...
i2 = ICRS(ra=2.0 * u.deg, dec=2.0 * u.deg)
i2_many = ICRS(ra=[2.0, 4.0] * u.deg, dec=[2.0, -8.1] * u.deg)
# converting from FK5 to ICRS and back changes the *internal* representation,
# but it should still come out in the preferred form
i4 = i2.transform_to(FK5()).transform_to(ICRS())
i4_many = i2_many.transform_to(FK5()).transform_to(ICRS())
ri2 = "".join(rexrepr.split(repr(i2)))
ri4 = "".join(rexrepr.split(repr(i4)))
assert ri2 == ri4
assert i2.data.lon.unit != i4.data.lon.unit # Internal repr changed
ri2_many = "".join(rexrepr.split(repr(i2_many)))
ri4_many = "".join(rexrepr.split(repr(i4_many)))
assert ri2_many == ri4_many
assert i2_many.data.lon.unit != i4_many.data.lon.unit # Internal repr changed
# but that *shouldn't* hold if we turn off units for the representation
class FakeICRS(ICRS):
frame_specific_representation_info = {
"spherical": [
RepresentationMapping("lon", "ra", u.hourangle),
RepresentationMapping("lat", "dec", None),
RepresentationMapping("distance", "distance"),
] # should fall back to default of None unit
}
fi = FakeICRS(i4.data)
ri2 = "".join(rexrepr.split(repr(i2)))
rfi = "".join(rexrepr.split(repr(fi)))
rfi = re.sub("FakeICRS", "ICRS", rfi) # Force frame name to match
assert ri2 != rfi
# the attributes should also get the right units
assert i2.dec.unit == i4.dec.unit
# unless no/explicitly given units
assert i2.dec.unit != fi.dec.unit
assert i2.ra.unit != fi.ra.unit
assert fi.ra.unit == u.hourangle
def test_representation_info():
class NewICRS1(ICRS):
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping("lon", "rara", u.hourangle),
RepresentationMapping("lat", "decdec", u.degree),
RepresentationMapping("distance", "distance", u.kpc),
]
}
i1 = NewICRS1(
rara=10 * u.degree,
decdec=-12 * u.deg,
distance=1000 * u.pc,
pm_rara_cosdecdec=100 * u.mas / u.yr,
pm_decdec=17 * u.mas / u.yr,
radial_velocity=10 * u.km / u.s,
)
assert allclose(i1.rara, 10 * u.deg)
assert i1.rara.unit == u.hourangle
assert allclose(i1.decdec, -12 * u.deg)
assert allclose(i1.distance, 1000 * u.pc)
assert i1.distance.unit == u.kpc
assert allclose(i1.pm_rara_cosdecdec, 100 * u.mas / u.yr)
assert allclose(i1.pm_decdec, 17 * u.mas / u.yr)
# this should auto-set the names of UnitSpherical:
i1.set_representation_cls(
r.UnitSphericalRepresentation, s=r.UnitSphericalCosLatDifferential
)
assert allclose(i1.rara, 10 * u.deg)
assert allclose(i1.decdec, -12 * u.deg)
assert allclose(i1.pm_rara_cosdecdec, 100 * u.mas / u.yr)
assert allclose(i1.pm_decdec, 17 * u.mas / u.yr)
# For backwards compatibility, we also support the string name in the
# representation info dictionary:
class NewICRS2(ICRS):
frame_specific_representation_info = {
"spherical": [
RepresentationMapping("lon", "ang1", u.hourangle),
RepresentationMapping("lat", "ang2", u.degree),
RepresentationMapping("distance", "howfar", u.kpc),
]
}
i2 = NewICRS2(ang1=10 * u.degree, ang2=-12 * u.deg, howfar=1000 * u.pc)
assert allclose(i2.ang1, 10 * u.deg)
assert i2.ang1.unit == u.hourangle
assert allclose(i2.ang2, -12 * u.deg)
assert allclose(i2.howfar, 1000 * u.pc)
assert i2.howfar.unit == u.kpc
# Test that the differential kwargs get overridden
class NewICRS3(ICRS):
frame_specific_representation_info = {
r.SphericalCosLatDifferential: [
RepresentationMapping("d_lon_coslat", "pm_ang1", u.hourangle / u.year),
RepresentationMapping("d_lat", "pm_ang2"),
RepresentationMapping("d_distance", "vlos", u.kpc / u.Myr),
]
}
i3 = NewICRS3(
lon=10 * u.degree,
lat=-12 * u.deg,
distance=1000 * u.pc,
pm_ang1=1 * u.mas / u.yr,
pm_ang2=2 * u.mas / u.yr,
vlos=100 * u.km / u.s,
)
assert allclose(i3.pm_ang1, 1 * u.mas / u.yr)
assert i3.pm_ang1.unit == u.hourangle / u.year
assert allclose(i3.pm_ang2, 2 * u.mas / u.yr)
assert allclose(i3.vlos, 100 * u.km / u.s)
assert i3.vlos.unit == u.kpc / u.Myr
def test_realizing():
rep = r.SphericalRepresentation(1 * u.deg, 2 * u.deg, 3 * u.kpc)
i = ICRS()
i2 = i.realize_frame(rep)
assert not i.has_data
assert i2.has_data
f = FK5(equinox=Time("J2001"))
f2 = f.realize_frame(rep)
assert not f.has_data
assert f2.has_data
assert f2.equinox == f.equinox
assert f2.equinox != FK5.get_frame_attr_defaults()["equinox"]
# Check that a nicer error message is returned:
with pytest.raises(
TypeError, match="Class passed as data instead of a representation"
):
f.realize_frame(f.representation_type)
def test_replicating():
i = ICRS(ra=[1] * u.deg, dec=[2] * u.deg)
icopy = i.replicate(copy=True)
irepl = i.replicate(copy=False)
i.data._lat[:] = 0 * u.deg
assert np.all(i.data.lat == irepl.data.lat)
assert np.all(i.data.lat != icopy.data.lat)
iclone = i.replicate_without_data()
assert i.has_data
assert not iclone.has_data
aa = AltAz(alt=1 * u.deg, az=2 * u.deg, obstime=Time("J2000"))
aaclone = aa.replicate_without_data(obstime=Time("J2001"))
assert not aaclone.has_data
assert aa.obstime != aaclone.obstime
assert aa.pressure == aaclone.pressure
assert aa.obswl == aaclone.obswl
def test_getitem():
rep = r.SphericalRepresentation(
[1, 2, 3] * u.deg, [4, 5, 6] * u.deg, [7, 8, 9] * u.kpc
)
i = ICRS(rep)
assert len(i.ra) == 3
iidx = i[1:]
assert len(iidx.ra) == 2
iidx2 = i[0]
assert iidx2.ra.isscalar
def test_transform():
"""
This test just makes sure the transform architecture works, but does *not*
actually test all the builtin transforms themselves are accurate.
"""
i = ICRS(ra=[1, 2] * u.deg, dec=[3, 4] * u.deg)
f = i.transform_to(FK5())
i2 = f.transform_to(ICRS())
assert i2.data.__class__ == r.UnitSphericalRepresentation
assert_allclose(i.ra, i2.ra)
assert_allclose(i.dec, i2.dec)
i = ICRS(ra=[1, 2] * u.deg, dec=[3, 4] * u.deg, distance=[5, 6] * u.kpc)
f = i.transform_to(FK5())
i2 = f.transform_to(ICRS())
assert i2.data.__class__ != r.UnitSphericalRepresentation
f = FK5(ra=1 * u.deg, dec=2 * u.deg, equinox=Time("J2001"))
f4 = f.transform_to(FK4())
f4_2 = f.transform_to(FK4(equinox=f.equinox))
# make sure attributes are copied over correctly
assert f4.equinox == FK4().equinox
assert f4_2.equinox == f.equinox
# make sure self-transforms also work
i = ICRS(ra=[1, 2] * u.deg, dec=[3, 4] * u.deg)
i2 = i.transform_to(ICRS())
assert_allclose(i.ra, i2.ra)
assert_allclose(i.dec, i2.dec)
f = FK5(ra=1 * u.deg, dec=2 * u.deg, equinox=Time("J2001"))
f2 = f.transform_to(FK5()) # default equinox, so should be *different*
assert f2.equinox == FK5().equinox
with pytest.raises(AssertionError):
assert_allclose(f.ra, f2.ra)
with pytest.raises(AssertionError):
assert_allclose(f.dec, f2.dec)
# finally, check Galactic round-tripping
i1 = ICRS(ra=[1, 2] * u.deg, dec=[3, 4] * u.deg)
i2 = i1.transform_to(Galactic()).transform_to(ICRS())
assert_allclose(i1.ra, i2.ra)
assert_allclose(i1.dec, i2.dec)
def test_transform_to_nonscalar_nodata_frame():
# https://github.com/astropy/astropy/pull/5254#issuecomment-241592353
times = Time("2016-08-23") + np.linspace(0, 10, 12) * u.day
coo1 = ICRS(
ra=[[0.0], [10.0], [20.0]] * u.deg, dec=[[-30.0], [30.0], [60.0]] * u.deg
)
coo2 = coo1.transform_to(FK5(equinox=times))
assert coo2.shape == (3, 12)
def test_setitem_no_velocity():
"""Test different flavors of item setting for a Frame without a velocity."""
obstime = "B1955"
sc0 = FK4([1, 2] * u.deg, [3, 4] * u.deg, obstime=obstime)
sc2 = FK4([10, 20] * u.deg, [30, 40] * u.deg, obstime=obstime)
sc1 = sc0.copy()
sc1_repr = repr(sc1)
assert "representation" in sc1.cache
sc1[1] = sc2[0]
assert sc1.cache == {}
assert repr(sc2) != sc1_repr
assert np.allclose(sc1.ra.to_value(u.deg), [1, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [3, 30])
assert sc1.obstime == sc2.obstime
assert sc1.name == "fk4"
sc1 = sc0.copy()
sc1[:] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 30])
sc1 = sc0.copy()
sc1[:] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 20])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 40])
sc1 = sc0.copy()
sc1[[1, 0]] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [20, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [40, 30])
# Works for array-valued obstime so long as they are considered equivalent
sc1 = FK4(sc0.ra, sc0.dec, obstime=[obstime, obstime])
sc1[0] = sc2[0]
# Multidimensional coordinates
sc1 = FK4([[1, 2], [3, 4]] * u.deg, [[5, 6], [7, 8]] * u.deg)
sc2 = FK4([[10, 20], [30, 40]] * u.deg, [[50, 60], [70, 80]] * u.deg)
sc1[0] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [[10, 20], [3, 4]])
assert np.allclose(sc1.dec.to_value(u.deg), [[50, 60], [7, 8]])
def test_setitem_velocities():
"""Test different flavors of item setting for a Frame with a velocity."""
sc0 = FK4(
[1, 2] * u.deg,
[3, 4] * u.deg,
radial_velocity=[1, 2] * u.km / u.s,
obstime="B1950",
)
sc2 = FK4(
[10, 20] * u.deg,
[30, 40] * u.deg,
radial_velocity=[10, 20] * u.km / u.s,
obstime="B1950",
)
sc1 = sc0.copy()
sc1[1] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [1, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [3, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [1, 10])
assert sc1.obstime == sc2.obstime
assert sc1.name == "fk4"
sc1 = sc0.copy()
sc1[:] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [10, 10])
sc1 = sc0.copy()
sc1[:] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 20])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 40])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [10, 20])
sc1 = sc0.copy()
sc1[[1, 0]] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [20, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [40, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [20, 10])
def test_setitem_exceptions():
obstime = "B1950"
sc0 = FK4([1, 2] * u.deg, [3, 4] * u.deg)
sc2 = FK4([10, 20] * u.deg, [30, 40] * u.deg, obstime=obstime)
sc1 = Galactic(sc0.ra, sc0.dec)
with pytest.raises(
TypeError, match="can only set from object of same class: Galactic vs. FK4"
):
sc1[0] = sc2[0]
sc1 = FK4(sc0.ra, sc0.dec, obstime="B2001")
with pytest.raises(
ValueError, match="can only set frame item from an equivalent frame"
):
sc1[0] = sc2[0]
sc1 = FK4(sc0.ra[0], sc0.dec[0], obstime=obstime)
with pytest.raises(
TypeError, match="scalar 'FK4' frame object does not support item assignment"
):
sc1[0] = sc2[0]
sc1 = FK4(obstime=obstime)
with pytest.raises(ValueError, match="cannot set frame which has no data"):
sc1[0] = sc2[0]
sc1 = FK4(sc0.ra, sc0.dec, obstime=[obstime, "B1980"])
with pytest.raises(
ValueError, match="can only set frame item from an equivalent frame"
):
sc1[0] = sc2[0]
# Wrong shape
sc1 = FK4([sc0.ra], [sc0.dec], obstime=[obstime, "B1980"])
with pytest.raises(
ValueError, match="can only set frame item from an equivalent frame"
):
sc1[0] = sc2[0]
def test_sep():
i1 = ICRS(ra=0 * u.deg, dec=1 * u.deg)
i2 = ICRS(ra=0 * u.deg, dec=2 * u.deg)
sep = i1.separation(i2)
assert_allclose(sep.deg, 1.0)
i3 = ICRS(ra=[1, 2] * u.deg, dec=[3, 4] * u.deg, distance=[5, 6] * u.kpc)
i4 = ICRS(ra=[1, 2] * u.deg, dec=[3, 4] * u.deg, distance=[4, 5] * u.kpc)
sep3d = i3.separation_3d(i4)
assert_allclose(sep3d.to(u.kpc), np.array([1, 1]) * u.kpc)
# check that it works even with velocities
i5 = ICRS(
ra=[1, 2] * u.deg,
dec=[3, 4] * u.deg,
distance=[5, 6] * u.kpc,
pm_ra_cosdec=[1, 2] * u.mas / u.yr,
pm_dec=[3, 4] * u.mas / u.yr,
radial_velocity=[5, 6] * u.km / u.s,
)
i6 = ICRS(
ra=[1, 2] * u.deg,
dec=[3, 4] * u.deg,
distance=[7, 8] * u.kpc,
pm_ra_cosdec=[1, 2] * u.mas / u.yr,
pm_dec=[3, 4] * u.mas / u.yr,
radial_velocity=[5, 6] * u.km / u.s,
)
sep3d = i5.separation_3d(i6)
assert_allclose(sep3d.to(u.kpc), np.array([2, 2]) * u.kpc)
# 3d separations of dimensionless distances should still work
i7 = ICRS(ra=1 * u.deg, dec=2 * u.deg, distance=3 * u.one)
i8 = ICRS(ra=1 * u.deg, dec=2 * u.deg, distance=4 * u.one)
sep3d = i7.separation_3d(i8)
assert_allclose(sep3d, 1 * u.one)
# but should fail with non-dimensionless
with pytest.raises(ValueError):
i7.separation_3d(i3)
def test_time_inputs():
"""
Test validation and conversion of inputs for equinox and obstime attributes.
"""
c = FK4(1 * u.deg, 2 * u.deg, equinox="J2001.5", obstime="2000-01-01 12:00:00")
assert c.equinox == Time("J2001.5")
assert c.obstime == Time("2000-01-01 12:00:00")
with pytest.raises(ValueError) as err:
c = FK4(1 * u.deg, 2 * u.deg, equinox=1.5)
assert "Invalid time input" in str(err.value)
with pytest.raises(ValueError) as err:
c = FK4(1 * u.deg, 2 * u.deg, obstime="hello")
assert "Invalid time input" in str(err.value)
# A vector time should work if the shapes match, but we don't automatically
# broadcast the basic data (just like time).
FK4([1, 2] * u.deg, [2, 3] * u.deg, obstime=["J2000", "J2001"])
with pytest.raises(ValueError) as err:
FK4(1 * u.deg, 2 * u.deg, obstime=["J2000", "J2001"])
assert "shape" in str(err.value)
def test_is_frame_attr_default():
"""
Check that the `is_frame_attr_default` machinery works as expected
"""
c1 = FK5(ra=1 * u.deg, dec=1 * u.deg)
c2 = FK5(
ra=1 * u.deg, dec=1 * u.deg, equinox=FK5.get_frame_attr_defaults()["equinox"]
)
c3 = FK5(ra=1 * u.deg, dec=1 * u.deg, equinox=Time("J2001.5"))
assert c1.equinox == c2.equinox
assert c1.equinox != c3.equinox
assert c1.is_frame_attr_default("equinox")
assert not c2.is_frame_attr_default("equinox")
assert not c3.is_frame_attr_default("equinox")
c4 = c1.realize_frame(r.UnitSphericalRepresentation(3 * u.deg, 4 * u.deg))
c5 = c2.realize_frame(r.UnitSphericalRepresentation(3 * u.deg, 4 * u.deg))
assert c4.is_frame_attr_default("equinox")
assert not c5.is_frame_attr_default("equinox")
def test_altaz_attributes():
aa = AltAz(1 * u.deg, 2 * u.deg)
assert aa.obstime is None
assert aa.location is None
aa2 = AltAz(1 * u.deg, 2 * u.deg, obstime="J2000")
assert aa2.obstime == Time("J2000")
aa3 = AltAz(
1 * u.deg, 2 * u.deg, location=EarthLocation(0 * u.deg, 0 * u.deg, 0 * u.m)
)
assert isinstance(aa3.location, EarthLocation)
def test_hadec_attributes():
hd = HADec(1 * u.hourangle, 2 * u.deg)
assert hd.ha == 1.0 * u.hourangle
assert hd.dec == 2 * u.deg
assert hd.obstime is None
assert hd.location is None
hd2 = HADec(
23 * u.hourangle,
-2 * u.deg,
obstime="J2000",
location=EarthLocation(0 * u.deg, 0 * u.deg, 0 * u.m),
)
assert_allclose(hd2.ha, -1 * u.hourangle)
assert hd2.dec == -2 * u.deg
assert hd2.obstime == Time("J2000")
assert isinstance(hd2.location, EarthLocation)
sr = hd2.represent_as(r.SphericalRepresentation)
assert_allclose(sr.lon, -1 * u.hourangle)
def test_representation():
"""
Test the getter and setter properties for `representation`
"""
# Create the frame object.
icrs = ICRS(ra=1 * u.deg, dec=1 * u.deg)
data = icrs.data
# Create some representation objects.
icrs_cart = icrs.cartesian
icrs_spher = icrs.spherical
icrs_cyl = icrs.cylindrical
# Testing when `_representation` set to `CartesianRepresentation`.
icrs.representation_type = r.CartesianRepresentation
assert icrs.representation_type == r.CartesianRepresentation
assert icrs_cart.x == icrs.x
assert icrs_cart.y == icrs.y
assert icrs_cart.z == icrs.z
assert icrs.data == data
# Testing that an ICRS object in CartesianRepresentation must not have spherical attributes.
for attr in ("ra", "dec", "distance"):
with pytest.raises(AttributeError) as err:
getattr(icrs, attr)
assert "object has no attribute" in str(err.value)
# Testing when `_representation` set to `CylindricalRepresentation`.
icrs.representation_type = r.CylindricalRepresentation
assert icrs.representation_type == r.CylindricalRepresentation
assert icrs.data == data
# Testing setter input using text argument for spherical.
icrs.representation_type = "spherical"
assert icrs.representation_type is r.SphericalRepresentation
assert icrs_spher.lat == icrs.dec
assert icrs_spher.lon == icrs.ra
assert icrs_spher.distance == icrs.distance
assert icrs.data == data
# Testing that an ICRS object in SphericalRepresentation must not have cartesian attributes.
for attr in ("x", "y", "z"):
with pytest.raises(AttributeError) as err:
getattr(icrs, attr)
assert "object has no attribute" in str(err.value)
# Testing setter input using text argument for cylindrical.
icrs.representation_type = "cylindrical"
assert icrs.representation_type is r.CylindricalRepresentation
assert icrs_cyl.rho == icrs.rho
assert icrs_cyl.phi == icrs.phi
assert icrs_cyl.z == icrs.z
assert icrs.data == data
# Testing that an ICRS object in CylindricalRepresentation must not have spherical attributes.
for attr in ("ra", "dec", "distance"):
with pytest.raises(AttributeError) as err:
getattr(icrs, attr)
assert "object has no attribute" in str(err.value)
with pytest.raises(ValueError) as err:
icrs.representation_type = "WRONG"
assert "but must be a BaseRepresentation class" in str(err.value)
with pytest.raises(ValueError) as err:
icrs.representation_type = ICRS
assert "but must be a BaseRepresentation class" in str(err.value)
def test_represent_as():
icrs = ICRS(ra=1 * u.deg, dec=1 * u.deg)
cart1 = icrs.represent_as("cartesian")
cart2 = icrs.represent_as(r.CartesianRepresentation)
cart1.x == cart2.x
cart1.y == cart2.y
cart1.z == cart2.z
# now try with velocities
icrs = ICRS(
ra=0 * u.deg,
dec=0 * u.deg,
distance=10 * u.kpc,
pm_ra_cosdec=0 * u.mas / u.yr,
pm_dec=0 * u.mas / u.yr,
radial_velocity=1 * u.km / u.s,
)
# single string
rep2 = icrs.represent_as("cylindrical")
assert isinstance(rep2, r.CylindricalRepresentation)
assert isinstance(rep2.differentials["s"], r.CylindricalDifferential)
# single class with positional in_frame_units, verify that warning raised
with pytest.warns(AstropyWarning, match="argument position") as w:
icrs.represent_as(r.CylindricalRepresentation, False)
assert len(w) == 1
# TODO: this should probably fail in the future once we figure out a better
# workaround for dealing with UnitSphericalRepresentation's with
# RadialDifferential's
# two classes
# rep2 = icrs.represent_as(r.CartesianRepresentation,
# r.SphericalCosLatDifferential)
# assert isinstance(rep2, r.CartesianRepresentation)
# assert isinstance(rep2.differentials['s'], r.SphericalCosLatDifferential)
with pytest.raises(ValueError):
icrs.represent_as("odaigahara")
def test_shorthand_representations():
rep = r.CartesianRepresentation([1, 2, 3] * u.pc)
dif = r.CartesianDifferential([1, 2, 3] * u.km / u.s)
rep = rep.with_differentials(dif)
icrs = ICRS(rep)
cyl = icrs.cylindrical
assert isinstance(cyl, r.CylindricalRepresentation)
assert isinstance(cyl.differentials["s"], r.CylindricalDifferential)
sph = icrs.spherical
assert isinstance(sph, r.SphericalRepresentation)
assert isinstance(sph.differentials["s"], r.SphericalDifferential)
sph = icrs.sphericalcoslat
assert isinstance(sph, r.SphericalRepresentation)
assert isinstance(sph.differentials["s"], r.SphericalCosLatDifferential)
def test_equal():
obstime = "B1955"
sc1 = FK4([1, 2] * u.deg, [3, 4] * u.deg, obstime=obstime)
sc2 = FK4([1, 20] * u.deg, [3, 4] * u.deg, obstime=obstime)
# Compare arrays and scalars
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
assert isinstance(v := (sc1[0] == sc2[0]), (bool, np.bool_)) and v
assert isinstance(v := (sc1[0] != sc2[0]), (bool, np.bool_)) and not v
# Broadcasting
eq = sc1[0] == sc2
ne = sc1[0] != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
# With diff only in velocity
sc1 = FK4([1, 2] * u.deg, [3, 4] * u.deg, radial_velocity=[1, 2] * u.km / u.s)
sc2 = FK4([1, 2] * u.deg, [3, 4] * u.deg, radial_velocity=[1, 20] * u.km / u.s)
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
assert isinstance(v := (sc1[0] == sc2[0]), (bool, np.bool_)) and v
assert isinstance(v := (sc1[0] != sc2[0]), (bool, np.bool_)) and not v
assert (FK4() == ICRS()) is False
assert (FK4() == FK4(obstime="J1999")) is False
def test_equal_exceptions():
# Shape mismatch
sc1 = FK4([1, 2, 3] * u.deg, [3, 4, 5] * u.deg)
with pytest.raises(ValueError, match="cannot compare: shape mismatch"):
sc1 == sc1[:2]
# Different representation_type
sc1 = FK4(1, 2, 3, representation_type="cartesian")
sc2 = FK4(1 * u.deg, 2 * u.deg, 2, representation_type="spherical")
with pytest.raises(
TypeError,
match=(
"cannot compare: objects must have same "
"class: CartesianRepresentation vs. SphericalRepresentation"
),
):
sc1 == sc2
# Different differential type
sc1 = FK4(1 * u.deg, 2 * u.deg, radial_velocity=1 * u.km / u.s)
sc2 = FK4(
1 * u.deg, 2 * u.deg, pm_ra_cosdec=1 * u.mas / u.yr, pm_dec=1 * u.mas / u.yr
)
with pytest.raises(
TypeError,
match=(
"cannot compare: objects must have same "
"class: RadialDifferential vs. UnitSphericalCosLatDifferential"
),
):
sc1 == sc2
# Different frame attribute
sc1 = FK5(1 * u.deg, 2 * u.deg)
sc2 = FK5(1 * u.deg, 2 * u.deg, equinox="J1999")
with pytest.raises(
TypeError,
match=r"cannot compare: objects must have equivalent "
r"frames: <FK5 Frame \(equinox=J2000.000\)> "
r"vs. <FK5 Frame \(equinox=J1999.000\)>",
):
sc1 == sc2
# Different frame
sc1 = FK4(1 * u.deg, 2 * u.deg)
sc2 = FK5(1 * u.deg, 2 * u.deg, equinox="J2000")
with pytest.raises(
TypeError,
match="cannot compare: objects must have equivalent "
r"frames: <FK4 Frame \(equinox=B1950.000, obstime=B1950.000\)> "
r"vs. <FK5 Frame \(equinox=J2000.000\)>",
):
sc1 == sc2
sc1 = FK4(1 * u.deg, 2 * u.deg)
sc2 = FK4()
with pytest.raises(
ValueError, match="cannot compare: one frame has data and the other does not"
):
sc1 == sc2
with pytest.raises(
ValueError, match="cannot compare: one frame has data and the other does not"
):
sc2 == sc1
def test_dynamic_attrs():
c = ICRS(1 * u.deg, 2 * u.deg)
assert "ra" in dir(c)
assert "dec" in dir(c)
with pytest.raises(AttributeError) as err:
c.blahblah
assert "object has no attribute 'blahblah'" in str(err.value)
with pytest.raises(AttributeError) as err:
c.ra = 1
assert "Cannot set any frame attribute" in str(err.value)
c.blahblah = 1
assert c.blahblah == 1
def test_nodata_error():
i = ICRS()
with pytest.raises(ValueError) as excinfo:
i.data
assert "does not have associated data" in str(excinfo.value)
def test_len0_data():
i = ICRS([] * u.deg, [] * u.deg)
assert i.has_data
repr(i)
def test_quantity_attributes():
# make sure we can create a GCRS frame with valid inputs
GCRS(obstime="J2002", obsgeoloc=[1, 2, 3] * u.km, obsgeovel=[4, 5, 6] * u.km / u.s)
# make sure it fails for invalid lovs or vels
with pytest.raises(TypeError):
GCRS(obsgeoloc=[1, 2, 3]) # no unit
with pytest.raises(u.UnitsError):
GCRS(obsgeoloc=[1, 2, 3] * u.km / u.s) # incorrect unit
with pytest.raises(ValueError):
GCRS(obsgeoloc=[1, 3] * u.km) # incorrect shape
def test_quantity_attribute_default():
# The default default (yes) is None:
class MyCoord(BaseCoordinateFrame):
someval = QuantityAttribute(unit=u.deg)
frame = MyCoord()
assert frame.someval is None
frame = MyCoord(someval=15 * u.deg)
assert u.isclose(frame.someval, 15 * u.deg)
# This should work if we don't explicitly pass in a unit, but we pass in a
# default value with a unit
class MyCoord2(BaseCoordinateFrame):
someval = QuantityAttribute(15 * u.deg)
frame = MyCoord2()
assert u.isclose(frame.someval, 15 * u.deg)
# Since here no shape was given, we can set to any shape we like.
frame = MyCoord2(someval=np.ones(3) * u.deg)
assert frame.someval.shape == (3,)
assert np.all(frame.someval == 1 * u.deg)
# We should also be able to insist on a given shape.
class MyCoord3(BaseCoordinateFrame):
someval = QuantityAttribute(unit=u.arcsec, shape=(3,))
frame = MyCoord3(someval=np.ones(3) * u.deg)
assert frame.someval.shape == (3,)
assert frame.someval.unit == u.arcsec
assert u.allclose(frame.someval.value, 3600.0)
# The wrong shape raises.
with pytest.raises(ValueError, match="shape"):
MyCoord3(someval=1.0 * u.deg)
# As does the wrong unit.
with pytest.raises(u.UnitsError):
MyCoord3(someval=np.ones(3) * u.m)
# We are allowed a short-cut for zero.
frame0 = MyCoord3(someval=0)
assert frame0.someval.shape == (3,)
assert frame0.someval.unit == u.arcsec
assert np.all(frame0.someval.value == 0.0)
# But not if it has the wrong shape.
with pytest.raises(ValueError, match="shape"):
MyCoord3(someval=np.zeros(2))
# This should fail, if we don't pass in a default or a unit
with pytest.raises(ValueError):
class MyCoord(BaseCoordinateFrame):
someval = QuantityAttribute()
def test_eloc_attributes():
el = EarthLocation(lon=12.3 * u.deg, lat=45.6 * u.deg, height=1 * u.km)
it = ITRS(
r.SphericalRepresentation(lon=12.3 * u.deg, lat=45.6 * u.deg, distance=1 * u.km)
)
gc = GCRS(ra=12.3 * u.deg, dec=45.6 * u.deg, distance=6375 * u.km)
el1 = AltAz(location=el).location
assert isinstance(el1, EarthLocation)
# these should match *exactly* because the EarthLocation
assert el1.lat == el.lat
assert el1.lon == el.lon
assert el1.height == el.height
el2 = AltAz(location=it).location
assert isinstance(el2, EarthLocation)
# these should *not* match because giving something in Spherical ITRS is
# *not* the same as giving it as an EarthLocation: EarthLocation is on an
# elliptical geoid. So the longitude should match (because flattening is
# only along the z-axis), but latitude should not. Also, height is relative
# to the *surface* in EarthLocation, but the ITRS distance is relative to
# the center of the Earth
assert not allclose(el2.lat, it.spherical.lat)
assert allclose(el2.lon, it.spherical.lon)
assert el2.height < -6000 * u.km
el3 = AltAz(location=gc).location
# GCRS inputs implicitly get transformed to ITRS and then onto
# EarthLocation's elliptical geoid. So both lat and lon shouldn't match
assert isinstance(el3, EarthLocation)
assert not allclose(el3.lat, gc.dec)
assert not allclose(el3.lon, gc.ra)
assert np.abs(el3.height) < 500 * u.km
def test_equivalent_frames():
i = ICRS()
i2 = ICRS(1 * u.deg, 2 * u.deg)
assert i.is_equivalent_frame(i)
assert i.is_equivalent_frame(i2)
with pytest.raises(TypeError):
assert i.is_equivalent_frame(10)
with pytest.raises(TypeError):
assert i2.is_equivalent_frame(SkyCoord(i2))
f0 = FK5() # this J2000 is TT
f1 = FK5(equinox="J2000")
f2 = FK5(1 * u.deg, 2 * u.deg, equinox="J2000")
f3 = FK5(equinox="J2010")
f4 = FK4(equinox="J2010")
assert f1.is_equivalent_frame(f1)
assert not i.is_equivalent_frame(f1)
assert f0.is_equivalent_frame(f1)
assert f1.is_equivalent_frame(f2)
assert not f1.is_equivalent_frame(f3)
assert not f3.is_equivalent_frame(f4)
aa1 = AltAz()
aa2 = AltAz(obstime="J2010")
assert aa2.is_equivalent_frame(aa2)
assert not aa1.is_equivalent_frame(i)
assert not aa1.is_equivalent_frame(aa2)
def test_equivalent_frame_coordinateattribute():
class FrameWithCoordinateAttribute(BaseCoordinateFrame):
coord_attr = CoordinateAttribute(HCRS)
# These frames should not be considered equivalent
f0 = FrameWithCoordinateAttribute()
f1 = FrameWithCoordinateAttribute(
coord_attr=HCRS(1 * u.deg, 2 * u.deg, obstime="J2000")
)
f2 = FrameWithCoordinateAttribute(
coord_attr=HCRS(3 * u.deg, 4 * u.deg, obstime="J2000")
)
f3 = FrameWithCoordinateAttribute(
coord_attr=HCRS(1 * u.deg, 2 * u.deg, obstime="J2001")
)
assert not f0.is_equivalent_frame(f1)
assert not f1.is_equivalent_frame(f0)
assert not f1.is_equivalent_frame(f2)
assert not f1.is_equivalent_frame(f3)
assert not f2.is_equivalent_frame(f3)
# They each should still be equivalent to a deep copy of themselves
assert f0.is_equivalent_frame(deepcopy(f0))
assert f1.is_equivalent_frame(deepcopy(f1))
assert f2.is_equivalent_frame(deepcopy(f2))
assert f3.is_equivalent_frame(deepcopy(f3))
def test_equivalent_frame_locationattribute():
class FrameWithLocationAttribute(BaseCoordinateFrame):
loc_attr = EarthLocationAttribute()
# These frames should not be considered equivalent
f0 = FrameWithLocationAttribute()
location = EarthLocation(lat=-34, lon=19, height=300)
f1 = FrameWithLocationAttribute(loc_attr=location)
assert not f0.is_equivalent_frame(f1)
assert not f1.is_equivalent_frame(f0)
# They each should still be equivalent to a deep copy of themselves
assert f0.is_equivalent_frame(deepcopy(f0))
assert f1.is_equivalent_frame(deepcopy(f1))
def test_representation_subclass():
# Regression test for #3354
# Normally when instantiating a frame without a distance the frame will try
# and use UnitSphericalRepresentation internally instead of
# SphericalRepresentation.
frame = FK5(
representation_type=r.SphericalRepresentation, ra=32 * u.deg, dec=20 * u.deg
)
assert type(frame._data) == r.UnitSphericalRepresentation
assert frame.representation_type == r.SphericalRepresentation
# If using a SphericalRepresentation class this used to not work, so we
# test here that this is now fixed.
class NewSphericalRepresentation(r.SphericalRepresentation):
attr_classes = r.SphericalRepresentation.attr_classes
frame = FK5(
representation_type=NewSphericalRepresentation, lon=32 * u.deg, lat=20 * u.deg
)
assert type(frame._data) == r.UnitSphericalRepresentation
assert frame.representation_type == NewSphericalRepresentation
# A similar issue then happened in __repr__ with subclasses of
# SphericalRepresentation.
assert (
repr(frame)
== "<FK5 Coordinate (equinox=J2000.000): (lon, lat) in deg\n (32., 20.)>"
)
# A more subtle issue is when specifying a custom
# UnitSphericalRepresentation subclass for the data and
# SphericalRepresentation or a subclass for the representation.
class NewUnitSphericalRepresentation(r.UnitSphericalRepresentation):
attr_classes = r.UnitSphericalRepresentation.attr_classes
def __repr__(self):
return "<NewUnitSphericalRepresentation: spam spam spam>"
frame = FK5(
NewUnitSphericalRepresentation(lon=32 * u.deg, lat=20 * u.deg),
representation_type=NewSphericalRepresentation,
)
assert repr(frame) == "<FK5 Coordinate (equinox=J2000.000): spam spam spam>"
def test_getitem_representation():
"""
Make sure current representation survives __getitem__ even if different
from data representation.
"""
c = ICRS([1, 1] * u.deg, [2, 2] * u.deg)
c.representation_type = "cartesian"
assert c[0].representation_type is r.CartesianRepresentation
def test_component_error_useful():
"""
Check that a data-less frame gives useful error messages about not having
data when the attributes asked for are possible coordinate components
"""
i = ICRS()
with pytest.raises(ValueError) as excinfo:
i.ra
assert "does not have associated data" in str(excinfo.value)
with pytest.raises(AttributeError) as excinfo1:
i.foobar
with pytest.raises(AttributeError) as excinfo2:
i.lon # lon is *not* the component name despite being the underlying representation's name
assert "object has no attribute 'foobar'" in str(excinfo1.value)
assert "object has no attribute 'lon'" in str(excinfo2.value)
def test_cache_clear():
i = ICRS(1 * u.deg, 2 * u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
assert len(i.cache["representation"]) == 2
i.cache.clear()
assert len(i.cache["representation"]) == 0
def test_inplace_array():
i = ICRS([[1, 2], [3, 4]] * u.deg, [[10, 20], [30, 40]] * u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
# Check that repr() has added a rep to the cache
assert len(i.cache["representation"]) == 2
# Modify the data
i.data.lon[:, 0] = [100, 200] * u.deg
# Clear the cache
i.cache.clear()
# This will use a second (potentially cached rep)
assert_allclose(i.ra, [[100, 2], [200, 4]] * u.deg)
assert_allclose(i.dec, [[10, 20], [30, 40]] * u.deg)
def test_inplace_change():
i = ICRS(1 * u.deg, 2 * u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
# Check that repr() has added a rep to the cache
assert len(i.cache["representation"]) == 2
# Modify the data
i.data.lon[()] = 10 * u.deg
# Clear the cache
i.cache.clear()
# This will use a second (potentially cached rep)
assert i.ra == 10 * u.deg
assert i.dec == 2 * u.deg
def test_representation_with_multiple_differentials():
dif1 = r.CartesianDifferential([1, 2, 3] * u.km / u.s)
dif2 = r.CartesianDifferential([1, 2, 3] * u.km / u.s**2)
rep = r.CartesianRepresentation(
[1, 2, 3] * u.pc, differentials={"s": dif1, "s2": dif2}
)
# check warning is raised for a scalar
with pytest.raises(ValueError):
ICRS(rep)
def test_missing_component_error_names():
"""
This test checks that the component names are frame component names, not
representation or differential names, when referenced in an exception raised
when not passing in enough data. For example:
ICRS(ra=10*u.deg)
should state:
TypeError: __init__() missing 1 required positional argument: 'dec'
"""
with pytest.raises(TypeError) as e:
ICRS(ra=150 * u.deg)
assert "missing 1 required positional argument: 'dec'" in str(e.value)
with pytest.raises(TypeError) as e:
ICRS(
ra=150 * u.deg,
dec=-11 * u.deg,
pm_ra=100 * u.mas / u.yr,
pm_dec=10 * u.mas / u.yr,
)
assert "pm_ra_cosdec" in str(e.value)
def test_non_spherical_representation_unit_creation(unitphysics): # noqa: F811
class PhysicsICRS(ICRS):
default_representation = r.PhysicsSphericalRepresentation
pic = PhysicsICRS(phi=1 * u.deg, theta=25 * u.deg, r=1 * u.kpc)
assert isinstance(pic.data, r.PhysicsSphericalRepresentation)
picu = PhysicsICRS(phi=1 * u.deg, theta=25 * u.deg)
assert isinstance(picu.data, unitphysics)
def test_attribute_repr():
class Spam:
def _astropy_repr_in_frame(self):
return "TEST REPR"
class TestFrame(BaseCoordinateFrame):
attrtest = Attribute(default=Spam())
assert "TEST REPR" in repr(TestFrame())
def test_component_names_repr():
# Frame class with new component names that includes a name swap
class NameChangeFrame(BaseCoordinateFrame):
default_representation = r.PhysicsSphericalRepresentation
frame_specific_representation_info = {
r.PhysicsSphericalRepresentation: [
RepresentationMapping("phi", "theta", u.deg),
RepresentationMapping("theta", "phi", u.arcsec),
RepresentationMapping("r", "JUSTONCE", u.AU),
]
}
frame = NameChangeFrame(0 * u.deg, 0 * u.arcsec, 0 * u.AU)
# Check for the new names in the Frame repr
assert "(theta, phi, JUSTONCE)" in repr(frame)
# Check that the letter "r" has not been replaced more than once in the Frame repr
assert repr(frame).count("JUSTONCE") == 1
def test_galactocentric_defaults():
with galactocentric_frame_defaults.set("pre-v4.0"):
galcen_pre40 = Galactocentric()
with galactocentric_frame_defaults.set("v4.0"):
galcen_40 = Galactocentric()
with galactocentric_frame_defaults.set("latest"):
galcen_latest = Galactocentric()
# parameters that changed
assert not u.allclose(galcen_pre40.galcen_distance, galcen_40.galcen_distance)
assert not u.allclose(galcen_pre40.z_sun, galcen_40.z_sun)
for k in galcen_40.frame_attributes:
if isinstance(getattr(galcen_40, k), BaseCoordinateFrame):
continue # skip coordinate comparison...
elif isinstance(getattr(galcen_40, k), CartesianDifferential):
assert u.allclose(
getattr(galcen_40, k).d_xyz, getattr(galcen_latest, k).d_xyz
)
else:
assert getattr(galcen_40, k) == getattr(galcen_latest, k)
# test validate Galactocentric
with galactocentric_frame_defaults.set("latest"):
params = galactocentric_frame_defaults.validate(galcen_latest)
references = galcen_latest.frame_attribute_references
state = dict(parameters=params, references=references)
assert galactocentric_frame_defaults.parameters == params
assert galactocentric_frame_defaults.references == references
assert galactocentric_frame_defaults._state == state
# Test not one of accepted parameter types
with pytest.raises(ValueError):
galactocentric_frame_defaults.validate(ValueError)
# test parameters property
assert (
galactocentric_frame_defaults.parameters
== galactocentric_frame_defaults.parameters
)
def test_galactocentric_references():
# references in the "scientific paper"-sense
with galactocentric_frame_defaults.set("pre-v4.0"):
galcen_pre40 = Galactocentric()
for k in galcen_pre40.frame_attributes:
if k == "roll": # no reference for this parameter
continue
assert k in galcen_pre40.frame_attribute_references
with galactocentric_frame_defaults.set("v4.0"):
galcen_40 = Galactocentric()
for k in galcen_40.frame_attributes:
if k == "roll": # no reference for this parameter
continue
assert k in galcen_40.frame_attribute_references
with galactocentric_frame_defaults.set("v4.0"):
galcen_custom = Galactocentric(z_sun=15 * u.pc)
for k in galcen_custom.frame_attributes:
if k == "roll": # no reference for this parameter
continue
if k == "z_sun":
assert k not in galcen_custom.frame_attribute_references
else:
assert k in galcen_custom.frame_attribute_references
def test_coordinateattribute_transformation():
class FrameWithCoordinateAttribute(BaseCoordinateFrame):
coord_attr = CoordinateAttribute(HCRS)
hcrs = HCRS(1 * u.deg, 2 * u.deg, 3 * u.AU, obstime="2001-02-03")
f1_frame = FrameWithCoordinateAttribute(coord_attr=hcrs)
f1_skycoord = FrameWithCoordinateAttribute(coord_attr=SkyCoord(hcrs))
# The input is already HCRS, so the frame attribute should not change it
assert f1_frame.coord_attr == hcrs
# The output should not be different if a SkyCoord is provided
assert f1_skycoord.coord_attr == f1_frame.coord_attr
gcrs = GCRS(4 * u.deg, 5 * u.deg, 6 * u.AU, obstime="2004-05-06")
f2_frame = FrameWithCoordinateAttribute(coord_attr=gcrs)
f2_skycoord = FrameWithCoordinateAttribute(coord_attr=SkyCoord(gcrs))
# The input needs to be converted from GCRS to HCRS
assert isinstance(f2_frame.coord_attr, HCRS)
# The `obstime` frame attribute should have been "merged" in a SkyCoord-style transformation
assert f2_frame.coord_attr.obstime == gcrs.obstime
# The output should not be different if a SkyCoord is provided
assert f2_skycoord.coord_attr == f2_frame.coord_attr
def test_realize_frame_accepts_kwargs():
c1 = ICRS(
x=1 * u.pc,
y=2 * u.pc,
z=3 * u.pc,
representation_type=r.CartesianRepresentation,
)
new_data = r.CartesianRepresentation(x=11 * u.pc, y=12 * u.pc, z=13 * u.pc)
c2 = c1.realize_frame(new_data, representation_type="cartesian")
c3 = c1.realize_frame(new_data, representation_type="cylindrical")
assert c2.representation_type == r.CartesianRepresentation
assert c3.representation_type == r.CylindricalRepresentation
def test_nameless_frame_subclass():
"""Note: this is a regression test for #11096"""
class Test:
pass
# Subclass from a frame class and a non-frame class.
# This subclassing is the test!
class NewFrame(ICRS, Test):
pass
def test_frame_coord_comparison():
"""Test that frame can be compared to a SkyCoord"""
frame = ICRS(0 * u.deg, 0 * u.deg)
coord = SkyCoord(frame)
other = SkyCoord(ICRS(0 * u.deg, 1 * u.deg))
assert frame == coord
assert frame != other
assert not (frame == other)
error_msg = "objects must have equivalent frames"
with pytest.raises(TypeError, match=error_msg):
frame == SkyCoord(AltAz("0d", "1d"))
coord = SkyCoord(ra=12 * u.hourangle, dec=5 * u.deg, frame=FK5(equinox="J1950"))
frame = FK5(ra=12 * u.hourangle, dec=5 * u.deg, equinox="J2000")
with pytest.raises(TypeError, match=error_msg):
coord == frame
frame = ICRS()
coord = SkyCoord(0 * u.deg, 0 * u.deg, frame=frame)
error_msg = "Can only compare SkyCoord to Frame with data"
with pytest.raises(ValueError, match=error_msg):
frame == coord
|
aa5af3d8c2d8816542b9aa7f357a2a2c92b95a037782f7e831f175da7db0eda6 | """
This file tests the behavior of subclasses of Representation and Frames
"""
from copy import deepcopy
import astropy.coordinates
import astropy.units as u
from astropy.coordinates import ICRS, Latitude, Longitude
from astropy.coordinates.baseframe import RepresentationMapping, frame_transform_graph
from astropy.coordinates.representation import (
REPRESENTATION_CLASSES,
SphericalRepresentation,
UnitSphericalRepresentation,
_invalidate_reprdiff_cls_hash,
)
from astropy.coordinates.transformations import FunctionTransform
# Classes setup, borrowed from SunPy.
# Here we define the classes *inside* the tests to make sure that we can wipe
# the slate clean when the tests have finished running.
def setup_function(func):
func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES)
def teardown_function(func):
REPRESENTATION_CLASSES.clear()
REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG)
_invalidate_reprdiff_cls_hash()
def test_unit_representation_subclass():
class Longitude180(Longitude):
def __new__(cls, angle, unit=None, wrap_angle=180 * u.deg, **kwargs):
self = super().__new__(
cls, angle, unit=unit, wrap_angle=wrap_angle, **kwargs
)
return self
class UnitSphericalWrap180Representation(UnitSphericalRepresentation):
attr_classes = {"lon": Longitude180, "lat": Latitude}
class SphericalWrap180Representation(SphericalRepresentation):
attr_classes = {"lon": Longitude180, "lat": Latitude, "distance": u.Quantity}
_unit_representation = UnitSphericalWrap180Representation
class MyFrame(ICRS):
default_representation = SphericalWrap180Representation
frame_specific_representation_info = {
"spherical": [
RepresentationMapping("lon", "ra"),
RepresentationMapping("lat", "dec"),
]
}
frame_specific_representation_info[
"unitsphericalwrap180"
] = frame_specific_representation_info[
"sphericalwrap180"
] = frame_specific_representation_info[
"spherical"
]
@frame_transform_graph.transform(
FunctionTransform, MyFrame, astropy.coordinates.ICRS
)
def myframe_to_icrs(myframe_coo, icrs):
return icrs.realize_frame(myframe_coo._data)
f = MyFrame(10 * u.deg, 10 * u.deg)
assert isinstance(f._data, UnitSphericalWrap180Representation)
assert isinstance(f.ra, Longitude180)
g = f.transform_to(astropy.coordinates.ICRS())
assert isinstance(g, astropy.coordinates.ICRS)
assert isinstance(g._data, UnitSphericalWrap180Representation)
frame_transform_graph.remove_transform(MyFrame, astropy.coordinates.ICRS, None)
|
89ef56ecad8c58859d4cf31609aef3bda9413eedbbdbd304597281f1b2d6d3b3 | import os
from urllib.error import HTTPError, URLError
import numpy as np
import pytest
from astropy import units as u
from astropy.constants import c
from astropy.coordinates.builtin_frames import TETE
from astropy.coordinates.earth import EarthLocation
from astropy.coordinates.funcs import get_sun
from astropy.coordinates.representation import (
CartesianRepresentation,
UnitSphericalRepresentation,
)
from astropy.coordinates.sky_coordinate import SkyCoord
from astropy.coordinates.solar_system import (
BODY_NAME_TO_KERNEL_SPEC,
_get_apparent_body_position,
get_body,
get_body_barycentric,
get_body_barycentric_posvel,
get_moon,
solar_system_ephemeris,
)
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import Time
from astropy.units import allclose as quantity_allclose
from astropy.utils.compat.optional_deps import HAS_JPLEPHEM, HAS_SKYFIELD
from astropy.utils.data import download_file, get_pkg_data_filename
if HAS_SKYFIELD:
from skyfield.api import Loader, Topos
de432s_separation_tolerance_planets = 5 * u.arcsec
de432s_separation_tolerance_moon = 5 * u.arcsec
de432s_distance_tolerance = 20 * u.km
skyfield_angular_separation_tolerance = 1 * u.arcsec
skyfield_separation_tolerance = 10 * u.km
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_SKYFIELD, reason="requires skyfield")
def test_positions_skyfield(tmp_path):
"""
Test positions against those generated by skyfield.
"""
load = Loader(tmp_path)
t = Time("1980-03-25 00:00")
location = None
# skyfield ephemeris
try:
planets = load("de421.bsp")
ts = load.timescale()
except OSError as e:
if os.environ.get("CI", False) and "timed out" in str(e):
pytest.xfail("Timed out in CI")
else:
raise
mercury, jupiter, moon = (
planets["mercury"],
planets["jupiter barycenter"],
planets["moon"],
)
earth = planets["earth"]
skyfield_t = ts.from_astropy(t)
if location is not None:
earth = earth + Topos(
latitude_degrees=location.lat.to_value(u.deg),
longitude_degrees=location.lon.to_value(u.deg),
elevation_m=location.height.to_value(u.m),
)
skyfield_mercury = earth.at(skyfield_t).observe(mercury).apparent()
skyfield_jupiter = earth.at(skyfield_t).observe(jupiter).apparent()
skyfield_moon = earth.at(skyfield_t).observe(moon).apparent()
if location is not None:
frame = TETE(obstime=t, location=location)
else:
frame = TETE(obstime=t)
ra, dec, dist = skyfield_mercury.radec(epoch="date")
skyfield_mercury = SkyCoord(
ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km), frame=frame
)
ra, dec, dist = skyfield_jupiter.radec(epoch="date")
skyfield_jupiter = SkyCoord(
ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km), frame=frame
)
ra, dec, dist = skyfield_moon.radec(epoch="date")
skyfield_moon = SkyCoord(
ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km), frame=frame
)
# planet positions w.r.t true equator and equinox
moon_astropy = get_moon(t, location, ephemeris="de430").transform_to(frame)
mercury_astropy = get_body("mercury", t, location, ephemeris="de430").transform_to(
frame
)
jupiter_astropy = get_body("jupiter", t, location, ephemeris="de430").transform_to(
frame
)
assert (
moon_astropy.separation(skyfield_moon) < skyfield_angular_separation_tolerance
)
assert moon_astropy.separation_3d(skyfield_moon) < skyfield_separation_tolerance
assert (
jupiter_astropy.separation(skyfield_jupiter)
< skyfield_angular_separation_tolerance
)
assert (
jupiter_astropy.separation_3d(skyfield_jupiter) < skyfield_separation_tolerance
)
assert (
mercury_astropy.separation(skyfield_mercury)
< skyfield_angular_separation_tolerance
)
assert (
mercury_astropy.separation_3d(skyfield_mercury) < skyfield_separation_tolerance
)
planets.close()
class TestPositionsGeocentric:
"""
Test positions against those generated by JPL Horizons accessed on
2016-03-28, with refraction turned on.
"""
def setup_method(self):
self.t = Time("1980-03-25 00:00")
self.apparent_frame = TETE(obstime=self.t)
# Results returned by JPL Horizons web interface
self.horizons = {
"mercury": SkyCoord(
ra="22h41m47.78s",
dec="-08d29m32.0s",
distance=c * 6.323037 * u.min,
frame=self.apparent_frame,
),
"moon": SkyCoord(
ra="07h32m02.62s",
dec="+18d34m05.0s",
distance=c * 0.021921 * u.min,
frame=self.apparent_frame,
),
"jupiter": SkyCoord(
ra="10h17m12.82s",
dec="+12d02m57.0s",
distance=c * 37.694557 * u.min,
frame=self.apparent_frame,
),
"sun": SkyCoord(
ra="00h16m31.00s",
dec="+01d47m16.9s",
distance=c * 8.294858 * u.min,
frame=self.apparent_frame,
),
}
@pytest.mark.parametrize(
("body", "sep_tol", "dist_tol"),
(
("mercury", 7.0 * u.arcsec, 1000 * u.km),
("jupiter", 78.0 * u.arcsec, 76000 * u.km),
("moon", 20.0 * u.arcsec, 80 * u.km),
("sun", 5.0 * u.arcsec, 11.0 * u.km),
),
)
def test_erfa_planet(self, body, sep_tol, dist_tol):
"""Test predictions using erfa/plan94.
Accuracies are maximum deviations listed in erfa/plan94.c, for Jupiter and
Mercury, and that quoted in Meeus "Astronomical Algorithms" (1998) for the Moon.
"""
astropy = get_body(body, self.t, ephemeris="builtin")
horizons = self.horizons[body]
# convert to true equator and equinox
astropy = astropy.transform_to(self.apparent_frame)
# Assert sky coordinates are close.
assert astropy.separation(horizons) < sep_tol
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance, atol=dist_tol)
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
@pytest.mark.parametrize("body", ("mercury", "jupiter", "sun"))
def test_de432s_planet(self, body):
astropy = get_body(body, self.t, ephemeris="de432s")
horizons = self.horizons[body]
# convert to true equator and equinox
astropy = astropy.transform_to(self.apparent_frame)
# Assert sky coordinates are close.
assert astropy.separation(horizons) < de432s_separation_tolerance_planets
# Assert distances are close.
assert_quantity_allclose(
astropy.distance, horizons.distance, atol=de432s_distance_tolerance
)
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
def test_de432s_moon(self):
astropy = get_moon(self.t, ephemeris="de432s")
horizons = self.horizons["moon"]
# convert to true equator and equinox
astropy = astropy.transform_to(self.apparent_frame)
# Assert sky coordinates are close.
assert astropy.separation(horizons) < de432s_separation_tolerance_moon
# Assert distances are close.
assert_quantity_allclose(
astropy.distance, horizons.distance, atol=de432s_distance_tolerance
)
class TestPositionKittPeak:
"""
Test positions against those generated by JPL Horizons accessed on
2016-03-28, with refraction turned on.
"""
def setup_method(self):
kitt_peak = EarthLocation.from_geodetic(
lon=-111.6 * u.deg, lat=31.963333333333342 * u.deg, height=2120 * u.m
)
self.t = Time("2014-09-25T00:00", location=kitt_peak)
self.apparent_frame = TETE(obstime=self.t, location=kitt_peak)
# Results returned by JPL Horizons web interface
self.horizons = {
"mercury": SkyCoord(
ra="13h38m58.50s",
dec="-13d34m42.6s",
distance=c * 7.699020 * u.min,
frame=self.apparent_frame,
),
"moon": SkyCoord(
ra="12h33m12.85s",
dec="-05d17m54.4s",
distance=c * 0.022054 * u.min,
frame=self.apparent_frame,
),
"jupiter": SkyCoord(
ra="09h09m55.55s",
dec="+16d51m57.8s",
distance=c * 49.244937 * u.min,
frame=self.apparent_frame,
),
}
@pytest.mark.parametrize(
("body", "sep_tol", "dist_tol"),
(
("mercury", 7.0 * u.arcsec, 500 * u.km),
("jupiter", 78.0 * u.arcsec, 82000 * u.km),
),
)
def test_erfa_planet(self, body, sep_tol, dist_tol):
"""Test predictions using erfa/plan94.
Accuracies are maximum deviations listed in erfa/plan94.c.
"""
# Add uncertainty in position of Earth
dist_tol = dist_tol + 1300 * u.km
astropy = get_body(body, self.t, ephemeris="builtin")
horizons = self.horizons[body]
# convert to true equator and equinox
astropy = astropy.transform_to(self.apparent_frame)
# Assert sky coordinates are close.
assert astropy.separation(horizons) < sep_tol
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance, atol=dist_tol)
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
@pytest.mark.parametrize("body", ("mercury", "jupiter"))
def test_de432s_planet(self, body):
astropy = get_body(body, self.t, ephemeris="de432s")
horizons = self.horizons[body]
# convert to true equator and equinox
astropy = astropy.transform_to(self.apparent_frame)
# Assert sky coordinates are close.
assert astropy.separation(horizons) < de432s_separation_tolerance_planets
# Assert distances are close.
assert_quantity_allclose(
astropy.distance, horizons.distance, atol=de432s_distance_tolerance
)
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
def test_de432s_moon(self):
astropy = get_moon(self.t, ephemeris="de432s")
horizons = self.horizons["moon"]
# convert to true equator and equinox
astropy = astropy.transform_to(self.apparent_frame)
# Assert sky coordinates are close.
assert astropy.separation(horizons) < de432s_separation_tolerance_moon
# Assert distances are close.
assert_quantity_allclose(
astropy.distance, horizons.distance, atol=de432s_distance_tolerance
)
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
@pytest.mark.parametrize("bodyname", ("mercury", "jupiter"))
def test_custom_kernel_spec_body(self, bodyname):
"""
Checks that giving a kernel specifier instead of a body name works
"""
coord_by_name = get_body(bodyname, self.t, ephemeris="de432s")
kspec = BODY_NAME_TO_KERNEL_SPEC[bodyname]
coord_by_kspec = get_body(kspec, self.t, ephemeris="de432s")
assert_quantity_allclose(coord_by_name.ra, coord_by_kspec.ra)
assert_quantity_allclose(coord_by_name.dec, coord_by_kspec.dec)
assert_quantity_allclose(coord_by_name.distance, coord_by_kspec.distance)
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
def test_horizons_consistency_with_precision():
"""
A test to compare at high precision against output of JPL horizons.
Tests ephemerides, and conversions from ICRS to GCRS to TETE. We are aiming for
better than 2 milli-arcsecond precision.
We use the Moon since it is nearby, and moves fast in the sky so we are
testing for parallax, proper handling of light deflection and aberration.
"""
# JPL Horizon values for 2020_04_06 00:00 to 23:00 in 1 hour steps
# JPL Horizons has a known offset (frame bias) of 51.02 mas in RA. We correct that here
ra_apparent_horizons = [
170.167332531,
170.560688674,
170.923834838,
171.271663481,
171.620188972,
171.985340827,
172.381766539,
172.821772139,
173.314502650,
173.865422398,
174.476108551,
175.144332386,
175.864375310,
176.627519827,
177.422655853,
178.236955730,
179.056584831,
179.867427392,
180.655815385,
181.409252074,
182.117113814,
182.771311578,
183.366872837,
183.902395443,
] * u.deg + 51.02376467 * u.mas
dec_apparent_horizons = [
10.269112037,
10.058820647,
9.837152044,
9.603724551,
9.358956528,
9.104012390,
8.840674927,
8.571162442,
8.297917326,
8.023394488,
7.749873882,
7.479312991,
7.213246666,
6.952732614,
6.698336823,
6.450150213,
6.207828142,
5.970645962,
5.737565957,
5.507313851,
5.278462034,
5.049521497,
4.819038911,
4.585696512,
] * u.deg
with solar_system_ephemeris.set("de430"):
loc = EarthLocation.from_geodetic(
-67.787260 * u.deg, -22.959748 * u.deg, 5186 * u.m
)
times = Time("2020-04-06 00:00") + np.arange(0, 24, 1) * u.hour
astropy = get_body("moon", times, loc)
apparent_frame = TETE(obstime=times, location=loc)
astropy = astropy.transform_to(apparent_frame)
usrepr = UnitSphericalRepresentation(
ra_apparent_horizons, dec_apparent_horizons
)
horizons = apparent_frame.realize_frame(usrepr)
assert_quantity_allclose(astropy.separation(horizons), 0 * u.mas, atol=1.5 * u.mas)
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
@pytest.mark.parametrize(
"time",
(Time("1960-01-12 00:00"), Time("1980-03-25 00:00"), Time("2010-10-13 00:00")),
)
def test_get_sun_consistency(time):
"""
Test that the sun from JPL and the builtin get_sun match
"""
sun_jpl_gcrs = get_body("sun", time, ephemeris="de432s")
builtin_get_sun = get_sun(time)
sep = builtin_get_sun.separation(sun_jpl_gcrs)
assert sep < 0.1 * u.arcsec
def test_get_moon_nonscalar_regression():
"""
Test that the builtin ephemeris works with non-scalar times.
See Issue #5069.
"""
times = Time(["2015-08-28 03:30", "2015-09-05 10:30"])
# the following line will raise an Exception if the bug recurs.
get_moon(times, ephemeris="builtin")
def test_barycentric_pos_posvel_same():
# Check that the two routines give identical results.
ep1 = get_body_barycentric("earth", Time("2016-03-20T12:30:00"))
ep2, _ = get_body_barycentric_posvel("earth", Time("2016-03-20T12:30:00"))
assert np.all(ep1.xyz == ep2.xyz)
def test_earth_barycentric_velocity_rough():
# Check that a time near the equinox gives roughly the right result.
ep, ev = get_body_barycentric_posvel("earth", Time("2016-03-20T12:30:00"))
assert_quantity_allclose(ep.xyz, [-1.0, 0.0, 0.0] * u.AU, atol=0.01 * u.AU)
expected = (
u.Quantity([0.0 * u.one, np.cos(23.5 * u.deg), np.sin(23.5 * u.deg)])
* -30.0
* u.km
/ u.s
)
assert_quantity_allclose(ev.xyz, expected, atol=1.0 * u.km / u.s)
def test_earth_barycentric_velocity_multi_d():
# Might as well test it with a multidimensional array too.
t = Time("2016-03-20T12:30:00") + np.arange(8.0).reshape(2, 2, 2) * u.yr / 2.0
ep, ev = get_body_barycentric_posvel("earth", t)
# note: assert_quantity_allclose doesn't like the shape mismatch.
# this is a problem with np.testing.assert_allclose.
assert quantity_allclose(
ep.get_xyz(xyz_axis=-1),
[[-1.0, 0.0, 0.0], [+1.0, 0.0, 0.0]] * u.AU,
atol=0.06 * u.AU,
)
expected = u.Quantity([0.0 * u.one, np.cos(23.5 * u.deg), np.sin(23.5 * u.deg)]) * (
[[-30.0], [30.0]] * u.km / u.s
)
assert quantity_allclose(ev.get_xyz(xyz_axis=-1), expected, atol=2.0 * u.km / u.s)
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
@pytest.mark.parametrize(
("body", "pos_tol", "vel_tol"),
(
("mercury", 1000.0 * u.km, 1.0 * u.km / u.s),
("jupiter", 100000.0 * u.km, 2.0 * u.km / u.s),
("earth", 10 * u.km, 10 * u.mm / u.s),
("moon", 18 * u.km, 50 * u.mm / u.s),
),
)
def test_barycentric_velocity_consistency(body, pos_tol, vel_tol):
# Tolerances are about 1.5 times the rms listed for plan94 and epv00,
# except for Mercury (which nominally is 334 km rms), and the Moon
# (which nominally is 6 km rms).
t = Time("2016-03-20T12:30:00")
ep, ev = get_body_barycentric_posvel(body, t, ephemeris="builtin")
dp, dv = get_body_barycentric_posvel(body, t, ephemeris="de432s")
assert_quantity_allclose(ep.xyz, dp.xyz, atol=pos_tol)
assert_quantity_allclose(ev.xyz, dv.xyz, atol=vel_tol)
# Might as well test it with a multidimensional array too.
t = Time("2016-03-20T12:30:00") + np.arange(8.0).reshape(2, 2, 2) * u.yr / 2.0
ep, ev = get_body_barycentric_posvel(body, t, ephemeris="builtin")
dp, dv = get_body_barycentric_posvel(body, t, ephemeris="de432s")
assert_quantity_allclose(ep.xyz, dp.xyz, atol=pos_tol)
assert_quantity_allclose(ev.xyz, dv.xyz, atol=vel_tol)
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
@pytest.mark.parametrize(
"time",
(Time("1960-01-12 00:00"), Time("1980-03-25 00:00"), Time("2010-10-13 00:00")),
)
def test_url_or_file_ephemeris(time):
# URL for ephemeris de432s used for testing:
url = "http://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/de432s.bsp"
# Pass the ephemeris directly as a URL.
coord_by_url = get_body("earth", time, ephemeris=url)
# Translate the URL to the cached location on the filesystem.
# Since we just used the url above, it should already have been downloaded.
filepath = download_file(url, cache=True)
# Get the coordinates using the file path directly:
coord_by_filepath = get_body("earth", time, ephemeris=filepath)
# Using the URL or filepath should give exactly the same results:
assert_quantity_allclose(coord_by_url.ra, coord_by_filepath.ra)
assert_quantity_allclose(coord_by_url.dec, coord_by_filepath.dec)
assert_quantity_allclose(coord_by_url.distance, coord_by_filepath.distance)
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
def test_url_ephemeris_wrong_input():
time = Time("1960-01-12 00:00")
with pytest.raises((HTTPError, URLError)):
# A non-existent URL
get_body(
"earth",
time,
ephemeris=get_pkg_data_filename("path/to/nonexisting/file.bsp"),
)
with pytest.raises(HTTPError):
# A non-existent version of the JPL ephemeris
get_body("earth", time, ephemeris="de001")
with pytest.raises(ValueError):
# An invalid string
get_body("earth", time, ephemeris="not_an_ephemeris")
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
def test_file_ephemeris_wrong_input():
time = Time("1960-01-12 00:00")
# Try loading a non-existing file:
with pytest.raises(ValueError):
get_body("earth", time, ephemeris="/path/to/nonexisting/file.bsp")
# NOTE: This test currently leaves the file open (ResourceWarning).
# To fix this issue, an upstream fix is required in jplephem
# package.
# Try loading a file that does exist, but is not an ephemeris file:
with pytest.warns(ResourceWarning), pytest.raises(ValueError):
get_body("earth", time, ephemeris=__file__)
def test_regression_10271():
t = Time(58973.534052125986, format="mjd")
# GCRS position of ALMA at this time
obs_p = CartesianRepresentation(
5724535.74068625, -1311071.58985697, -2492738.93017009, u.m
)
geocentre = CartesianRepresentation(0, 0, 0, u.m)
icrs_sun_from_alma = _get_apparent_body_position("sun", t, "builtin", obs_p)
icrs_sun_from_geocentre = _get_apparent_body_position(
"sun", t, "builtin", geocentre
)
difference = (icrs_sun_from_alma - icrs_sun_from_geocentre).norm()
assert_quantity_allclose(difference, 0.13046941 * u.m, atol=1 * u.mm)
|
41695d2931eed557df1e4ffd8778e3a2ef6c2481088f8a593dc1d55412744851 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import representation as r
from astropy.coordinates import transformations as t
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.builtin_frames import (
FK4,
FK5,
HCRS,
ICRS,
AltAz,
FK4NoETerms,
Galactic,
)
from astropy.coordinates.matrix_utilities import rotation_matrix
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.time import Time
from astropy.units import allclose as quantity_allclose
from astropy.utils.exceptions import AstropyWarning
# Coordinates just for these tests.
class TCoo1(ICRS):
pass
class TCoo2(ICRS):
pass
class TCoo3(ICRS):
pass
def test_transform_classes():
"""
Tests the class-based/OO syntax for creating transforms
"""
def tfun(c, f):
return f.__class__(ra=c.ra, dec=c.dec)
_ = t.FunctionTransform(tfun, TCoo1, TCoo2, register_graph=frame_transform_graph)
c1 = TCoo1(ra=1 * u.radian, dec=0.5 * u.radian)
c2 = c1.transform_to(TCoo2())
assert_allclose(c2.ra.radian, 1)
assert_allclose(c2.dec.radian, 0.5)
def matfunc(coo, fr):
return [[1, 0, 0], [0, coo.ra.degree, 0], [0, 0, 1]]
trans2 = t.DynamicMatrixTransform(matfunc, TCoo1, TCoo2)
trans2.register(frame_transform_graph)
c3 = TCoo1(ra=1 * u.deg, dec=2 * u.deg)
c4 = c3.transform_to(TCoo2())
assert_allclose(c4.ra.degree, 1)
assert_allclose(c4.ra.degree, 1)
# be sure to unregister the second one - no need for trans1 because it
# already got unregistered when trans2 was created.
trans2.unregister(frame_transform_graph)
def test_transform_decos():
"""
Tests the decorator syntax for creating transforms
"""
c1 = TCoo1(ra=1 * u.deg, dec=2 * u.deg)
@frame_transform_graph.transform(t.FunctionTransform, TCoo1, TCoo2)
def trans(coo1, f):
return TCoo2(ra=coo1.ra, dec=coo1.dec * 2)
c2 = c1.transform_to(TCoo2())
assert_allclose(c2.ra.degree, 1)
assert_allclose(c2.dec.degree, 4)
c3 = TCoo1(r.CartesianRepresentation(x=1 * u.pc, y=1 * u.pc, z=2 * u.pc))
@frame_transform_graph.transform(t.StaticMatrixTransform, TCoo1, TCoo2)
def matrix():
return [[2, 0, 0], [0, 1, 0], [0, 0, 1]]
c4 = c3.transform_to(TCoo2())
assert_allclose(c4.cartesian.x, 2 * u.pc)
assert_allclose(c4.cartesian.y, 1 * u.pc)
assert_allclose(c4.cartesian.z, 2 * u.pc)
def test_shortest_path():
class FakeTransform:
def __init__(self, pri):
self.priority = pri
g = t.TransformGraph()
# cheating by adding graph elements directly that are not classes - the
# graphing algorithm still works fine with integers - it just isn't a valid
# TransformGraph
# the graph looks is a down-going diamond graph with the lower-right slightly
# heavier and a cycle from the bottom to the top
# also, a pair of nodes isolated from 1
g._graph[1][2] = FakeTransform(1)
g._graph[1][3] = FakeTransform(1)
g._graph[2][4] = FakeTransform(1)
g._graph[3][4] = FakeTransform(2)
g._graph[4][1] = FakeTransform(5)
g._graph[5][6] = FakeTransform(1)
path, d = g.find_shortest_path(1, 2)
assert path == [1, 2]
assert d == 1
path, d = g.find_shortest_path(1, 3)
assert path == [1, 3]
assert d == 1
path, d = g.find_shortest_path(1, 4)
print("Cached paths:", g._shortestpaths)
assert path == [1, 2, 4]
assert d == 2
# unreachable
path, d = g.find_shortest_path(1, 5)
assert path is None
assert d == float("inf")
path, d = g.find_shortest_path(5, 6)
assert path == [5, 6]
assert d == 1
def test_sphere_cart():
"""
Tests the spherical <-> cartesian transform functions
"""
from astropy.coordinates import cartesian_to_spherical, spherical_to_cartesian
from astropy.utils import NumpyRNGContext
x, y, z = spherical_to_cartesian(1, 0, 0)
assert_allclose(x, 1)
assert_allclose(y, 0)
assert_allclose(z, 0)
x, y, z = spherical_to_cartesian(0, 1, 1)
assert_allclose(x, 0)
assert_allclose(y, 0)
assert_allclose(z, 0)
x, y, z = spherical_to_cartesian(5, 0, np.arcsin(4.0 / 5.0))
assert_allclose(x, 3)
assert_allclose(y, 4)
assert_allclose(z, 0)
r, lat, lon = cartesian_to_spherical(0, 1, 0)
assert_allclose(r, 1)
assert_allclose(lat, 0 * u.deg)
assert_allclose(lon, np.pi / 2 * u.rad)
# test round-tripping
with NumpyRNGContext(13579):
x, y, z = np.random.randn(3, 5)
r, lat, lon = cartesian_to_spherical(x, y, z)
x2, y2, z2 = spherical_to_cartesian(r, lat, lon)
assert_allclose(x, x2)
assert_allclose(y, y2)
assert_allclose(z, z2)
def test_transform_path_pri():
"""
This checks that the transformation path prioritization works by
making sure the ICRS -> Gal transformation always goes through FK5
and not FK4.
"""
frame_transform_graph.invalidate_cache()
tpath, td = frame_transform_graph.find_shortest_path(ICRS, Galactic)
assert tpath == [ICRS, FK5, Galactic]
assert td == 2
# but direct from FK4 to Galactic should still be possible
tpath, td = frame_transform_graph.find_shortest_path(FK4, Galactic)
assert tpath == [FK4, FK4NoETerms, Galactic]
assert td == 2
def test_obstime():
"""
Checks to make sure observation time is
accounted for at least in FK4 <-> ICRS transformations
"""
b1950 = Time("B1950")
j1975 = Time("J1975")
fk4_50 = FK4(ra=1 * u.deg, dec=2 * u.deg, obstime=b1950)
fk4_75 = FK4(ra=1 * u.deg, dec=2 * u.deg, obstime=j1975)
icrs_50 = fk4_50.transform_to(ICRS())
icrs_75 = fk4_75.transform_to(ICRS())
# now check that the resulting coordinates are *different* - they should be,
# because the obstime is different
assert icrs_50.ra.degree != icrs_75.ra.degree
assert icrs_50.dec.degree != icrs_75.dec.degree
# ------------------------------------------------------------------------------
# Affine transform tests and helpers:
# just acting as a namespace
class transfunc:
rep = r.CartesianRepresentation(np.arange(3) * u.pc)
dif = r.CartesianDifferential(*np.arange(3, 6) * u.pc / u.Myr)
rep0 = r.CartesianRepresentation(np.zeros(3) * u.pc)
@classmethod
def both(cls, coo, fr):
# exchange x <-> z and offset
M = np.array([[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]])
return M, cls.rep.with_differentials(cls.dif)
@classmethod
def just_matrix(cls, coo, fr):
# exchange x <-> z and offset
M = np.array([[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]])
return M, None
@classmethod
def no_matrix(cls, coo, fr):
return None, cls.rep.with_differentials(cls.dif)
@classmethod
def no_pos(cls, coo, fr):
return None, cls.rep0.with_differentials(cls.dif)
@classmethod
def no_vel(cls, coo, fr):
return None, cls.rep
@pytest.mark.parametrize(
"transfunc",
[
transfunc.both,
transfunc.no_matrix,
transfunc.no_pos,
transfunc.no_vel,
transfunc.just_matrix,
],
)
# fmt: off
@pytest.mark.parametrize('rep', [
r.CartesianRepresentation(5, 6, 7, unit=u.pc),
r.CartesianRepresentation(5, 6, 7, unit=u.pc,
differentials=r.CartesianDifferential(8, 9, 10,
unit=u.pc/u.Myr)),
r.CartesianRepresentation(5, 6, 7, unit=u.pc,
differentials=r.CartesianDifferential(8, 9, 10,
unit=u.pc/u.Myr))
.represent_as(r.CylindricalRepresentation, r.CylindricalDifferential)
])
# fmt: on
def test_affine_transform_succeed(transfunc, rep):
c = TCoo1(rep)
# compute expected output
M, offset = transfunc(c, TCoo2)
_rep = rep.to_cartesian()
diffs = {
k: diff.represent_as(r.CartesianDifferential, rep)
for k, diff in rep.differentials.items()
}
expected_rep = _rep.with_differentials(diffs)
if M is not None:
expected_rep = expected_rep.transform(M)
expected_pos = expected_rep.without_differentials()
if offset is not None:
expected_pos = expected_pos + offset.without_differentials()
expected_vel = None
if c.data.differentials:
expected_vel = expected_rep.differentials["s"]
if offset and offset.differentials:
expected_vel = expected_vel + offset.differentials["s"]
# register and do the transformation and check against expected
trans = t.AffineTransform(transfunc, TCoo1, TCoo2)
trans.register(frame_transform_graph)
c2 = c.transform_to(TCoo2())
assert quantity_allclose(
c2.data.to_cartesian().xyz, expected_pos.to_cartesian().xyz
)
if expected_vel is not None:
diff = c2.data.differentials["s"].to_cartesian(base=c2.data)
assert quantity_allclose(diff.xyz, expected_vel.d_xyz)
trans.unregister(frame_transform_graph)
# these should fail
def transfunc_invalid_matrix(coo, fr):
return np.eye(4), None
# Leaving this open in case we want to add more functions to check for failures
@pytest.mark.parametrize("transfunc", [transfunc_invalid_matrix])
def test_affine_transform_fail(transfunc):
diff = r.CartesianDifferential(8, 9, 10, unit=u.pc / u.Myr)
rep = r.CartesianRepresentation(5, 6, 7, unit=u.pc, differentials=diff)
c = TCoo1(rep)
# register and do the transformation and check against expected
trans = t.AffineTransform(transfunc, TCoo1, TCoo2)
trans.register(frame_transform_graph)
with pytest.raises(ValueError):
c.transform_to(TCoo2())
trans.unregister(frame_transform_graph)
def test_too_many_differentials():
dif1 = r.CartesianDifferential(*np.arange(3, 6) * u.pc / u.Myr)
dif2 = r.CartesianDifferential(*np.arange(3, 6) * u.pc / u.Myr**2)
rep = r.CartesianRepresentation(
np.arange(3) * u.pc, differentials={"s": dif1, "s2": dif2}
)
with pytest.raises(ValueError):
c = TCoo1(rep)
# register and do the transformation and check against expected
trans = t.AffineTransform(transfunc.both, TCoo1, TCoo2)
trans.register(frame_transform_graph)
# Check that if frame somehow gets through to transformation, multiple
# differentials are caught
c = TCoo1(rep.without_differentials())
c._data = c._data.with_differentials({"s": dif1, "s2": dif2})
with pytest.raises(ValueError):
c.transform_to(TCoo2())
trans.unregister(frame_transform_graph)
# A matrix transform of a unit spherical with differentials should work
# fmt: off
@pytest.mark.parametrize('rep', [
r.UnitSphericalRepresentation(lon=15*u.degree, lat=-11*u.degree,
differentials=r.SphericalDifferential(d_lon=15*u.mas/u.yr,
d_lat=11*u.mas/u.yr,
d_distance=-110*u.km/u.s)),
r.UnitSphericalRepresentation(lon=15*u.degree, lat=-11*u.degree,
differentials={'s': r.RadialDifferential(d_distance=-110*u.km/u.s)}),
r.SphericalRepresentation(lon=15*u.degree, lat=-11*u.degree,
distance=150*u.pc,
differentials={'s': r.RadialDifferential(d_distance=-110*u.km/u.s)})
])
# fmt: on
def test_unit_spherical_with_differentials(rep):
c = TCoo1(rep)
# register and do the transformation and check against expected
trans = t.AffineTransform(transfunc.just_matrix, TCoo1, TCoo2)
trans.register(frame_transform_graph)
c2 = c.transform_to(TCoo2())
assert "s" in rep.differentials
assert isinstance(c2.data.differentials["s"], rep.differentials["s"].__class__)
if isinstance(rep.differentials["s"], r.RadialDifferential):
assert c2.data.differentials["s"] is rep.differentials["s"]
trans.unregister(frame_transform_graph)
# should fail if we have to do offsets
trans = t.AffineTransform(transfunc.both, TCoo1, TCoo2)
trans.register(frame_transform_graph)
with pytest.raises(TypeError):
c.transform_to(TCoo2())
trans.unregister(frame_transform_graph)
def test_vel_transformation_obstime_err():
# TODO: replace after a final decision on PR #6280
from astropy.coordinates.sites import get_builtin_sites
diff = r.CartesianDifferential([0.1, 0.2, 0.3] * u.km / u.s)
rep = r.CartesianRepresentation([1, 2, 3] * u.au, differentials=diff)
loc = get_builtin_sites()["example_site"]
aaf = AltAz(obstime="J2010", location=loc)
aaf2 = AltAz(obstime=aaf.obstime + 3 * u.day, location=loc)
aaf3 = AltAz(obstime=aaf.obstime + np.arange(3) * u.day, location=loc)
aaf4 = AltAz(obstime=aaf.obstime, location=loc)
aa = aaf.realize_frame(rep)
with pytest.raises(NotImplementedError) as exc:
aa.transform_to(aaf2)
assert "cannot transform" in exc.value.args[0]
with pytest.raises(NotImplementedError) as exc:
aa.transform_to(aaf3)
assert "cannot transform" in exc.value.args[0]
aa.transform_to(aaf4)
aa.transform_to(ICRS())
def test_function_transform_with_differentials():
def tfun(c, f):
return f.__class__(ra=c.ra, dec=c.dec)
_ = t.FunctionTransform(tfun, TCoo3, TCoo2, register_graph=frame_transform_graph)
t3 = TCoo3(
ra=1 * u.deg,
dec=2 * u.deg,
pm_ra_cosdec=1 * u.marcsec / u.yr,
pm_dec=1 * u.marcsec / u.yr,
)
with pytest.warns(AstropyWarning, match=r".*they have been dropped.*") as w:
t3.transform_to(TCoo2())
assert len(w) == 1
def test_frame_override_component_with_attribute():
"""
It was previously possible to define a frame with an attribute with the
same name as a component. We don't want to allow this!
"""
from astropy.coordinates.attributes import Attribute
from astropy.coordinates.baseframe import BaseCoordinateFrame
class BorkedFrame(BaseCoordinateFrame):
ra = Attribute(default=150)
dec = Attribute(default=150)
def trans_func(coo1, f):
pass
trans = t.FunctionTransform(trans_func, BorkedFrame, ICRS)
with pytest.raises(ValueError) as exc:
trans.register(frame_transform_graph)
assert (
"BorkedFrame" in exc.value.args[0]
and "'ra'" in exc.value.args[0]
and "'dec'" in exc.value.args[0]
)
def test_static_matrix_combine_paths():
"""
Check that combined staticmatrixtransform matrices provide the same
transformation as using an intermediate transformation.
This is somewhat of a regression test for #7706
"""
from astropy.coordinates.baseframe import BaseCoordinateFrame
from astropy.coordinates.matrix_utilities import rotation_matrix
class AFrame(BaseCoordinateFrame):
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
t1 = t.StaticMatrixTransform(rotation_matrix(30.0 * u.deg, "z"), ICRS, AFrame)
t1.register(frame_transform_graph)
t2 = t.StaticMatrixTransform(rotation_matrix(30.0 * u.deg, "z").T, AFrame, ICRS)
t2.register(frame_transform_graph)
class BFrame(BaseCoordinateFrame):
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
t3 = t.StaticMatrixTransform(rotation_matrix(30.0 * u.deg, "x"), ICRS, BFrame)
t3.register(frame_transform_graph)
t4 = t.StaticMatrixTransform(rotation_matrix(30.0 * u.deg, "x").T, BFrame, ICRS)
t4.register(frame_transform_graph)
c = Galactic(123 * u.deg, 45 * u.deg)
c1 = c.transform_to(BFrame()) # direct
c2 = c.transform_to(AFrame()).transform_to(BFrame()) # thru A
c3 = c.transform_to(ICRS()).transform_to(BFrame()) # thru ICRS
assert quantity_allclose(c1.lon, c2.lon)
assert quantity_allclose(c1.lat, c2.lat)
assert quantity_allclose(c1.lon, c3.lon)
assert quantity_allclose(c1.lat, c3.lat)
for t_ in [t1, t2, t3, t4]:
t_.unregister(frame_transform_graph)
def test_multiple_aliases():
from astropy.coordinates.baseframe import BaseCoordinateFrame
# Define a frame with multiple aliases
class MultipleAliasesFrame(BaseCoordinateFrame):
name = ["alias_1", "alias_2"]
default_representation = r.SphericalRepresentation
def tfun(c, f):
return f.__class__(lon=c.lon, lat=c.lat)
# Register a transform
graph = t.TransformGraph()
_ = t.FunctionTransform(
tfun, MultipleAliasesFrame, MultipleAliasesFrame, register_graph=graph
)
# Test that both aliases have been added to the transform graph
assert graph.lookup_name("alias_1") == MultipleAliasesFrame
assert graph.lookup_name("alias_2") == MultipleAliasesFrame
# Test that both aliases appear in the graphviz DOT format output
dotstr = graph.to_dot_graph()
assert "`alias_1`\\n`alias_2`" in dotstr
def test_remove_transform_and_unregister():
def tfun(c, f):
f.__class__(ra=c.ra, dec=c.dec)
# Register transforms
graph = t.TransformGraph()
ftrans1 = t.FunctionTransform(tfun, TCoo1, TCoo1, register_graph=graph)
ftrans2 = t.FunctionTransform(tfun, TCoo2, TCoo2, register_graph=graph)
_ = t.FunctionTransform(tfun, TCoo1, TCoo2, register_graph=graph)
# Confirm that the frames are part of the graph
assert TCoo1 in graph.frame_set
assert TCoo2 in graph.frame_set
# Use all three ways to remove a transform
# Remove the only transform with TCoo2 as the "from" frame
ftrans2.unregister(graph)
# TCoo2 should still be part of the graph because it is the "to" frame of a transform
assert TCoo2 in graph.frame_set
# Remove the remaining transform that involves TCoo2
graph.remove_transform(TCoo1, TCoo2, None)
# Now TCoo2 should not be part of the graph
assert TCoo2 not in graph.frame_set
# Remove the remaining transform that involves TCoo1
graph.remove_transform(None, None, ftrans1)
# Now TCoo1 should not be part of the graph
assert TCoo1 not in graph.frame_set
def test_remove_transform_errors():
def tfun(c, f):
return f.__class__(ra=c.ra, dec=c.dec)
graph = t.TransformGraph()
_ = t.FunctionTransform(tfun, TCoo1, TCoo1, register_graph=graph)
# Test bad calls to remove_transform
with pytest.raises(ValueError):
graph.remove_transform(None, TCoo1, None)
with pytest.raises(ValueError):
graph.remove_transform(TCoo1, None, None)
with pytest.raises(ValueError):
graph.remove_transform(None, None, None)
with pytest.raises(ValueError):
graph.remove_transform(None, None, 1)
with pytest.raises(ValueError):
graph.remove_transform(TCoo1, TCoo1, 1)
def test_impose_finite_difference_dt():
class H1(HCRS):
pass
class H2(HCRS):
pass
class H3(HCRS):
pass
graph = t.TransformGraph()
tfun = lambda c, f: f.__class__(ra=c.ra, dec=c.dec)
# Set up a number of transforms with different time steps
old_dt = 1 * u.min
transform1 = t.FunctionTransformWithFiniteDifference(
tfun, H1, H1, register_graph=graph, finite_difference_dt=old_dt
)
transform2 = t.FunctionTransformWithFiniteDifference(
tfun, H2, H2, register_graph=graph, finite_difference_dt=old_dt * 2
)
transform3 = t.FunctionTransformWithFiniteDifference(
tfun, H2, H3, register_graph=graph, finite_difference_dt=old_dt * 3
)
# Check that all of the transforms have the same new time step
new_dt = 1 * u.yr
with graph.impose_finite_difference_dt(new_dt):
assert transform1.finite_difference_dt == new_dt
assert transform2.finite_difference_dt == new_dt
assert transform3.finite_difference_dt == new_dt
# Check that all of the original time steps have been restored
assert transform1.finite_difference_dt == old_dt
assert transform2.finite_difference_dt == old_dt * 2
assert transform3.finite_difference_dt == old_dt * 3
# fmt: off
@pytest.mark.parametrize("first, second, check",
[((rotation_matrix(30*u.deg), None),
(rotation_matrix(45*u.deg), None),
(rotation_matrix(75*u.deg), None)),
((rotation_matrix(30*u.deg), r.CartesianRepresentation([1, 0, 0])),
(rotation_matrix(45*u.deg), None),
(rotation_matrix(75*u.deg), r.CartesianRepresentation([1/np.sqrt(2), -1/np.sqrt(2), 0]))),
((rotation_matrix(30*u.deg), None),
(rotation_matrix(45*u.deg), r.CartesianRepresentation([0, 0, 1])),
(rotation_matrix(75*u.deg), r.CartesianRepresentation([0, 0, 1]))),
((rotation_matrix(30*u.deg), r.CartesianRepresentation([1, 0, 0])),
(rotation_matrix(45*u.deg), r.CartesianRepresentation([0, 0, 1])),
(rotation_matrix(75*u.deg), r.CartesianRepresentation([1/np.sqrt(2), -1/np.sqrt(2), 1]))),
((rotation_matrix(30*u.deg), r.CartesianRepresentation([1, 2 ,3])),
(None, r.CartesianRepresentation([4, 5, 6])),
(rotation_matrix(30*u.deg), r.CartesianRepresentation([5, 7, 9]))),
((None, r.CartesianRepresentation([1, 2, 3])),
(rotation_matrix(45*u.deg), r.CartesianRepresentation([4, 5, 6])),
(rotation_matrix(45*u.deg), r.CartesianRepresentation([3/np.sqrt(2)+4, 1/np.sqrt(2)+5, 9]))),
((None, r.CartesianRepresentation([1, 2, 3])),
(None, r.CartesianRepresentation([4, 5, 6])),
(None, r.CartesianRepresentation([5, 7, 9]))),
((rotation_matrix(30*u.deg), r.CartesianRepresentation([1, 0, 0])),
(None, None),
(rotation_matrix(30*u.deg), r.CartesianRepresentation([1, 0, 0]))),
((None, None),
(rotation_matrix(45*u.deg), r.CartesianRepresentation([0, 0, 1])),
(rotation_matrix(45*u.deg), r.CartesianRepresentation([0, 0, 1]))),
((None, None),
(None, None),
(None, None))])
# fmt: on
def test_combine_affine_params(first, second, check):
result = t._combine_affine_params(first, second)
if check[0] is None:
assert result[0] is None
else:
assert_allclose(result[0], check[0])
if check[1] is None:
assert result[1] is None
else:
assert_allclose(result[1].xyz, check[1].xyz)
|
944cc534cea56a01e287ec699e385f803dee3f0308f743c5e70c2f2939945e90 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Accuracy tests for ICRS transformations, primarily to/from AltAz.
"""
import numpy as np
from astropy import units as u
from astropy.coordinates import (
CIRS,
ICRS,
AltAz,
EarthLocation,
HADec,
SkyCoord,
frame_transform_graph,
)
from astropy.coordinates.angle_utilities import golden_spiral_grid
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.time import Time
def test_icrs_altaz_consistency():
"""
Check ICRS<->AltAz for consistency with ICRS<->CIRS<->AltAz
The latter is extensively tested in test_intermediate_transformations.py
"""
usph = golden_spiral_grid(200)
dist = np.linspace(0.5, 1, len(usph)) * u.km * 1e5
icoo = SkyCoord(ra=usph.lon, dec=usph.lat, distance=dist)
observer = EarthLocation(28 * u.deg, 23 * u.deg, height=2000.0 * u.km)
obstime = Time("J2010")
aa_frame = AltAz(obstime=obstime, location=observer)
# check we are going direct!
trans = frame_transform_graph.get_transform(ICRS, AltAz).transforms
assert len(trans) == 1
# check that ICRS-AltAz and ICRS->CIRS->AltAz are consistent
aa1 = icoo.transform_to(aa_frame)
aa2 = icoo.transform_to(CIRS()).transform_to(aa_frame)
assert_allclose(aa1.separation_3d(aa2), 0 * u.mm, atol=1 * u.mm)
# check roundtrip
roundtrip = icoo.transform_to(aa_frame).transform_to(icoo)
assert_allclose(roundtrip.separation_3d(icoo), 0 * u.mm, atol=1 * u.mm)
# check there and back via CIRS mish-mash
roundtrip = icoo.transform_to(aa_frame).transform_to(CIRS()).transform_to(icoo)
assert_allclose(roundtrip.separation_3d(icoo), 0 * u.mm, atol=1 * u.mm)
def test_icrs_hadec_consistency():
"""
Check ICRS<->HADec for consistency with ICRS<->CIRS<->HADec
"""
usph = golden_spiral_grid(200)
dist = np.linspace(0.5, 1, len(usph)) * u.km * 1e5
icoo = SkyCoord(ra=usph.lon, dec=usph.lat, distance=dist)
observer = EarthLocation(28 * u.deg, 23 * u.deg, height=2000.0 * u.km)
obstime = Time("J2010")
hd_frame = HADec(obstime=obstime, location=observer)
# check we are going direct!
trans = frame_transform_graph.get_transform(ICRS, HADec).transforms
assert len(trans) == 1
# check that ICRS-HADec and ICRS->CIRS->HADec are consistent
aa1 = icoo.transform_to(hd_frame)
aa2 = icoo.transform_to(CIRS()).transform_to(hd_frame)
assert_allclose(aa1.separation_3d(aa2), 0 * u.mm, atol=1 * u.mm)
# check roundtrip
roundtrip = icoo.transform_to(hd_frame).transform_to(icoo)
assert_allclose(roundtrip.separation_3d(icoo), 0 * u.mm, atol=1 * u.mm)
# check there and back via CIRS mish-mash
roundtrip = icoo.transform_to(hd_frame).transform_to(CIRS()).transform_to(icoo)
assert_allclose(roundtrip.separation_3d(icoo), 0 * u.mm, atol=1 * u.mm)
|
8043fa2683c996653a2e01279c329ac91edf04d86158e2000da30c33cbac1d0a | import pytest
from astropy.coordinates.builtin_frames.utils import (
get_offset_sun_from_barycenter,
get_polar_motion,
)
from astropy.coordinates.solar_system import get_body_barycentric_posvel
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import Time
from astropy.utils.exceptions import AstropyWarning
def test_polar_motion_unsupported_dates():
msg = r"Tried to get polar motions for times {} IERS.*"
with pytest.warns(AstropyWarning, match=msg.format("before")):
get_polar_motion(Time("1900-01-01"))
with pytest.warns(AstropyWarning, match=msg.format("after")):
get_polar_motion(Time("2100-01-01"))
def test_sun_from_barycenter_offset():
time = Time("2020-01-01")
pos, vel = get_body_barycentric_posvel("sun", time)
offset = get_offset_sun_from_barycenter(time)
assert_quantity_allclose(offset.xyz, pos.xyz)
assert not bool(offset.differentials)
offset_with_vel = get_offset_sun_from_barycenter(time, include_velocity=True)
assert_quantity_allclose(offset_with_vel.xyz, pos.xyz)
assert_quantity_allclose(offset_with_vel.differentials["s"].d_xyz, vel.xyz)
reverse = get_offset_sun_from_barycenter(time, reverse=True)
assert_quantity_allclose(reverse.xyz, -pos.xyz)
assert not bool(reverse.differentials)
reverse_with_vel = get_offset_sun_from_barycenter(
time, reverse=True, include_velocity=True
)
assert_quantity_allclose(reverse_with_vel.xyz, -pos.xyz)
assert_quantity_allclose(reverse_with_vel.differentials["s"].d_xyz, -vel.xyz)
|
faaf4c8273b39c2e47379cbe42fbc1c0429a4c127689d22fe915cbeaf7601dd0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test geodetic representations"""
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.coordinates.earth import (
GRS80GeodeticRepresentation,
WGS72GeodeticRepresentation,
WGS84GeodeticRepresentation,
)
from astropy.coordinates.representation import CartesianRepresentation
from astropy.units import allclose as quantity_allclose
def test_cartesian_wgs84geodetic_roundtrip():
# Test array-valued input in the process.
s1 = CartesianRepresentation(
x=[1, 3000.0] * u.km, y=[7000.0, 4.0] * u.km, z=[5.0, 6000.0] * u.km
)
s2 = WGS84GeodeticRepresentation.from_representation(s1)
s3 = CartesianRepresentation.from_representation(s2)
s4 = WGS84GeodeticRepresentation.from_representation(s3)
assert quantity_allclose(s1.x, s3.x)
assert quantity_allclose(s1.y, s3.y)
assert quantity_allclose(s1.z, s3.z)
assert quantity_allclose(s2.lon, s4.lon)
assert quantity_allclose(s2.lat, s4.lat)
assert quantity_allclose(s2.height, s4.height)
# Test initializer just for the sake of it.
s5 = WGS84GeodeticRepresentation(s2.lon, s2.lat, s2.height)
assert_array_equal(s2.lon, s5.lon)
assert_array_equal(s2.lat, s5.lat)
assert_array_equal(s2.height, s5.height)
def vvd(val, valok, dval, func, test, status):
"""Mimic routine of erfa/src/t_erfa_c.c (to help copy & paste)"""
assert quantity_allclose(val, valok * val.unit, atol=dval * val.unit)
def test_geocentric_to_geodetic():
"""Test that we reproduce erfa/src/t_erfa_c.c t_gc2gd"""
# Here, test the chain. Direct conversion from Cartesian to
# various Geodetic representations is done indirectly in test_earth.
x, y, z = (2e6, 3e6, 5.244e6)
status = 0 # help for copy & paste of vvd
gc = CartesianRepresentation(x, y, z, u.m)
gd = WGS84GeodeticRepresentation.from_cartesian(gc)
e, p, h = gd.lon.to(u.radian), gd.lat.to(u.radian), gd.height.to(u.m)
vvd(e, 0.9827937232473290680, 1e-14, "eraGc2gd", "e1", status)
vvd(p, 0.97160184819075459, 1e-14, "eraGc2gd", "p1", status)
vvd(h, 331.4172461426059892, 1e-8, "eraGc2gd", "h1", status)
gd = gd.represent_as(GRS80GeodeticRepresentation)
e, p, h = gd.lon.to(u.radian), gd.lat.to(u.radian), gd.height.to(u.m)
vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e2", status)
vvd(p, 0.97160184820607853, 1e-14, "eraGc2gd", "p2", status)
vvd(h, 331.41731754844348, 1e-8, "eraGc2gd", "h2", status)
gd = gd.represent_as(WGS72GeodeticRepresentation)
e, p, h = gd.lon.to(u.radian), gd.lat.to(u.radian), gd.height.to(u.m)
vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e3", status)
vvd(p, 0.97160181811015119, 1e-14, "eraGc2gd", "p3", status)
vvd(h, 333.27707261303181, 1e-8, "eraGc2gd", "h3", status)
def test_geodetic_to_geocentric():
"""Test that we reproduce erfa/src/t_erfa_c.c t_gd2gc"""
# These tests are also done implicitly in test_earth.py.
e = 3.1 * u.rad
p = -0.5 * u.rad
h = 2500.0 * u.m
status = 0 # help for copy & paste of vvd
gd = WGS84GeodeticRepresentation(e, p, h)
xyz = gd.to_cartesian().get_xyz()
vvd(xyz[0], -5599000.5577049947, 1e-7, "eraGd2gc", "0/1", status)
vvd(xyz[1], 233011.67223479203, 1e-7, "eraGd2gc", "1/1", status)
vvd(xyz[2], -3040909.4706983363, 1e-7, "eraGd2gc", "2/1", status)
gd = GRS80GeodeticRepresentation(e, p, h)
xyz = gd.to_cartesian().get_xyz()
vvd(xyz[0], -5599000.5577260984, 1e-7, "eraGd2gc", "0/2", status)
vvd(xyz[1], 233011.6722356703, 1e-7, "eraGd2gc", "1/2", status)
vvd(xyz[2], -3040909.4706095476, 1e-7, "eraGd2gc", "2/2", status)
gd = WGS72GeodeticRepresentation(e, p, h)
xyz = gd.to_cartesian().get_xyz()
vvd(xyz[0], -5598998.7626301490, 1e-7, "eraGd2gc", "0/3", status)
vvd(xyz[1], 233011.5975297822, 1e-7, "eraGd2gc", "1/3", status)
vvd(xyz[2], -3040908.6861467111, 1e-7, "eraGd2gc", "2/3", status)
def test_default_height_is_zero():
gd = WGS84GeodeticRepresentation(10 * u.deg, 20 * u.deg)
assert gd.lon == 10 * u.deg
assert gd.lat == 20 * u.deg
assert gd.height == 0 * u.m
def test_non_angle_error():
with pytest.raises(u.UnitTypeError):
WGS84GeodeticRepresentation(20 * u.m, 20 * u.deg, 20 * u.m)
def test_non_length_error():
with pytest.raises(u.UnitTypeError, match="units of length"):
WGS84GeodeticRepresentation(10 * u.deg, 20 * u.deg, 30)
|
7c6117145c062023c5fdb7971fb9f82ad2a2f65446686613509a2b6c88cf1d6a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import (
Latitude,
Longitude,
SphericalDifferential,
SphericalRepresentation,
)
from astropy.units.quantity_helper.function_helpers import ARRAY_FUNCTION_ENABLED
from .test_representation import representation_equal
@pytest.fixture(params=[True, False] if ARRAY_FUNCTION_ENABLED else [True])
def method(request):
return request.param
needs_array_function = pytest.mark.xfail(
not ARRAY_FUNCTION_ENABLED, reason="Needs __array_function__ support"
)
class ShapeSetup:
"""Manipulation of Representation shapes.
Checking that attributes are manipulated correctly.
Even more exhaustive tests are done in time.tests.test_methods
"""
def setup_class(cls):
# We set up some representations with, on purpose, copy=False,
# so we can check that broadcasting is handled correctly.
lon = Longitude(np.arange(0, 24, 4), u.hourangle)
lat = Latitude(np.arange(-90, 91, 30), u.deg)
# With same-sized arrays
cls.s0 = SphericalRepresentation(
lon[:, np.newaxis] * np.ones(lat.shape),
lat * np.ones(lon.shape)[:, np.newaxis],
np.ones(lon.shape + lat.shape) * u.kpc,
copy=False,
)
cls.diff = SphericalDifferential(
d_lon=np.ones(cls.s0.shape) * u.mas / u.yr,
d_lat=np.ones(cls.s0.shape) * u.mas / u.yr,
d_distance=np.ones(cls.s0.shape) * u.km / u.s,
copy=False,
)
cls.s0 = cls.s0.with_differentials(cls.diff)
# With unequal arrays -> these will be broadcasted.
cls.s1 = SphericalRepresentation(
lon[:, np.newaxis], lat, 1.0 * u.kpc, differentials=cls.diff, copy=False
)
# For completeness on some tests, also a cartesian one
cls.c0 = cls.s0.to_cartesian()
class TestManipulation(ShapeSetup):
"""Manipulation of Representation shapes.
Checking that attributes are manipulated correctly.
Even more exhaustive tests are done in time.tests.test_methods
"""
def test_ravel(self, method):
if method:
s0_ravel = self.s0.ravel()
else:
s0_ravel = np.ravel(self.s0)
assert type(s0_ravel) is type(self.s0)
assert s0_ravel.shape == (self.s0.size,)
assert np.all(s0_ravel.lon == self.s0.lon.ravel())
assert np.may_share_memory(s0_ravel.lon, self.s0.lon)
assert np.may_share_memory(s0_ravel.lat, self.s0.lat)
assert np.may_share_memory(s0_ravel.distance, self.s0.distance)
assert s0_ravel.differentials["s"].shape == (self.s0.size,)
# Since s1 was broadcast, ravel needs to make a copy.
if method:
s1_ravel = self.s1.ravel()
else:
s1_ravel = np.ravel(self.s1)
assert type(s1_ravel) is type(self.s1)
assert s1_ravel.shape == (self.s1.size,)
assert s1_ravel.differentials["s"].shape == (self.s1.size,)
assert np.all(s1_ravel.lon == self.s1.lon.ravel())
assert not np.may_share_memory(s1_ravel.lat, self.s1.lat)
def test_copy(self, method):
if method:
s0_copy = self.s0.copy()
else:
s0_copy = np.copy(self.s0)
s0_copy_diff = s0_copy.differentials["s"]
assert s0_copy.shape == self.s0.shape
assert np.all(s0_copy.lon == self.s0.lon)
assert np.all(s0_copy.lat == self.s0.lat)
# Check copy was made of internal data.
assert not np.may_share_memory(s0_copy.distance, self.s0.distance)
assert not np.may_share_memory(s0_copy_diff.d_lon, self.diff.d_lon)
def test_flatten(self):
s0_flatten = self.s0.flatten()
s0_diff = s0_flatten.differentials["s"]
assert s0_flatten.shape == (self.s0.size,)
assert s0_diff.shape == (self.s0.size,)
assert np.all(s0_flatten.lon == self.s0.lon.flatten())
assert np.all(s0_diff.d_lon == self.diff.d_lon.flatten())
# Flatten always copies.
assert not np.may_share_memory(s0_flatten.distance, self.s0.distance)
assert not np.may_share_memory(s0_diff.d_lon, self.diff.d_lon)
s1_flatten = self.s1.flatten()
assert s1_flatten.shape == (self.s1.size,)
assert np.all(s1_flatten.lon == self.s1.lon.flatten())
assert not np.may_share_memory(s1_flatten.lat, self.s1.lat)
def test_transpose(self):
s0_transpose = self.s0.transpose()
s0_diff = s0_transpose.differentials["s"]
assert s0_transpose.shape == (7, 6)
assert s0_diff.shape == s0_transpose.shape
assert np.all(s0_transpose.lon == self.s0.lon.transpose())
assert np.all(s0_diff.d_lon == self.diff.d_lon.transpose())
assert np.may_share_memory(s0_transpose.distance, self.s0.distance)
assert np.may_share_memory(s0_diff.d_lon, self.diff.d_lon)
s1_transpose = self.s1.transpose()
s1_diff = s1_transpose.differentials["s"]
assert s1_transpose.shape == (7, 6)
assert s1_diff.shape == s1_transpose.shape
assert np.all(s1_transpose.lat == self.s1.lat.transpose())
assert np.all(s1_diff.d_lon == self.diff.d_lon.transpose())
assert np.may_share_memory(s1_transpose.lat, self.s1.lat)
assert np.may_share_memory(s1_diff.d_lon, self.diff.d_lon)
# Only one check on T, since it just calls transpose anyway.
# Doing it on the CartesianRepresentation just for variety's sake.
c0_T = self.c0.T
assert c0_T.shape == (7, 6)
assert np.all(c0_T.x == self.c0.x.T)
assert np.may_share_memory(c0_T.y, self.c0.y)
def test_diagonal(self):
s0_diagonal = self.s0.diagonal()
s0_diff = s0_diagonal.differentials["s"]
assert s0_diagonal.shape == (6,)
assert s0_diff.shape == s0_diagonal.shape
assert np.all(s0_diagonal.lat == self.s0.lat.diagonal())
assert np.all(s0_diff.d_lon == self.diff.d_lon.diagonal())
assert np.may_share_memory(s0_diagonal.lat, self.s0.lat)
assert np.may_share_memory(s0_diff.d_lon, self.diff.d_lon)
def test_swapaxes(self, method):
if method:
s1_swapaxes = self.s1.swapaxes(0, 1)
else:
s1_swapaxes = np.swapaxes(self.s1, 0, 1)
s1_diff = s1_swapaxes.differentials["s"]
assert s1_swapaxes.shape == (7, 6)
assert s1_diff.shape == s1_swapaxes.shape
assert np.all(s1_swapaxes.lat == self.s1.lat.swapaxes(0, 1))
assert np.all(s1_diff.d_lon == self.diff.d_lon.swapaxes(0, 1))
assert np.may_share_memory(s1_swapaxes.lat, self.s1.lat)
assert np.may_share_memory(s1_diff.d_lon, self.diff.d_lon)
def test_reshape(self):
s0_reshape = self.s0.reshape(2, 3, 7)
s0_diff = s0_reshape.differentials["s"]
assert s0_reshape.shape == (2, 3, 7)
assert s0_diff.shape == s0_reshape.shape
assert np.all(s0_reshape.lon == self.s0.lon.reshape(2, 3, 7))
assert np.all(s0_reshape.lat == self.s0.lat.reshape(2, 3, 7))
assert np.all(s0_reshape.distance == self.s0.distance.reshape(2, 3, 7))
assert np.may_share_memory(s0_reshape.lon, self.s0.lon)
assert np.may_share_memory(s0_reshape.lat, self.s0.lat)
assert np.may_share_memory(s0_reshape.distance, self.s0.distance)
s1_reshape = self.s1.reshape(3, 2, 7)
s1_diff = s1_reshape.differentials["s"]
assert s1_reshape.shape == (3, 2, 7)
assert s1_diff.shape == s1_reshape.shape
assert np.all(s1_reshape.lat == self.s1.lat.reshape(3, 2, 7))
assert np.all(s1_diff.d_lon == self.diff.d_lon.reshape(3, 2, 7))
assert np.may_share_memory(s1_reshape.lat, self.s1.lat)
assert np.may_share_memory(s1_diff.d_lon, self.diff.d_lon)
# For reshape(3, 14), copying is necessary for lon, lat, but not for d
s1_reshape2 = self.s1.reshape(3, 14)
assert s1_reshape2.shape == (3, 14)
assert np.all(s1_reshape2.lon == self.s1.lon.reshape(3, 14))
assert not np.may_share_memory(s1_reshape2.lon, self.s1.lon)
assert s1_reshape2.distance.shape == (3, 14)
assert np.may_share_memory(s1_reshape2.distance, self.s1.distance)
def test_squeeze(self):
s0_squeeze = self.s0.reshape(3, 1, 2, 1, 7).squeeze()
s0_diff = s0_squeeze.differentials["s"]
assert s0_squeeze.shape == (3, 2, 7)
assert s0_diff.shape == s0_squeeze.shape
assert np.all(s0_squeeze.lat == self.s0.lat.reshape(3, 2, 7))
assert np.all(s0_diff.d_lon == self.diff.d_lon.reshape(3, 2, 7))
assert np.may_share_memory(s0_squeeze.lat, self.s0.lat)
def test_add_dimension(self):
s0_adddim = self.s0[:, np.newaxis, :]
s0_diff = s0_adddim.differentials["s"]
assert s0_adddim.shape == (6, 1, 7)
assert s0_diff.shape == s0_adddim.shape
assert np.all(s0_adddim.lon == self.s0.lon[:, np.newaxis, :])
assert np.all(s0_diff.d_lon == self.diff.d_lon[:, np.newaxis, :])
assert np.may_share_memory(s0_adddim.lat, self.s0.lat)
def test_take(self, method):
if method:
s0_take = self.s0.take((5, 2))
else:
s0_take = np.take(self.s0, (5, 2))
s0_diff = s0_take.differentials["s"]
assert s0_take.shape == (2,)
assert s0_diff.shape == s0_take.shape
assert np.all(s0_take.lon == self.s0.lon.take((5, 2)))
assert np.all(s0_diff.d_lon == self.diff.d_lon.take((5, 2)))
def test_broadcast_to_via_apply(self):
s0_broadcast = self.s0._apply(np.broadcast_to, (3, 6, 7), subok=True)
s0_diff = s0_broadcast.differentials["s"]
assert type(s0_broadcast) is type(self.s0)
assert s0_broadcast.shape == (3, 6, 7)
assert s0_diff.shape == s0_broadcast.shape
assert np.all(s0_broadcast.lon == self.s0.lon)
assert np.all(s0_broadcast.lat == self.s0.lat)
assert np.all(s0_broadcast.distance == self.s0.distance)
assert np.may_share_memory(s0_broadcast.lon, self.s0.lon)
assert np.may_share_memory(s0_broadcast.lat, self.s0.lat)
assert np.may_share_memory(s0_broadcast.distance, self.s0.distance)
class TestSetShape(ShapeSetup):
def test_shape_setting(self):
# Shape-setting should be on the object itself, since copying removes
# zero-strides due to broadcasting. Hence, this should be the only
# test in this class.
self.s0.shape = (2, 3, 7)
assert self.s0.shape == (2, 3, 7)
assert self.s0.lon.shape == (2, 3, 7)
assert self.s0.lat.shape == (2, 3, 7)
assert self.s0.distance.shape == (2, 3, 7)
assert self.diff.shape == (2, 3, 7)
assert self.diff.d_lon.shape == (2, 3, 7)
assert self.diff.d_lat.shape == (2, 3, 7)
assert self.diff.d_distance.shape == (2, 3, 7)
# this works with the broadcasting.
self.s1.shape = (2, 3, 7)
assert self.s1.shape == (2, 3, 7)
assert self.s1.lon.shape == (2, 3, 7)
assert self.s1.lat.shape == (2, 3, 7)
assert self.s1.distance.shape == (2, 3, 7)
assert self.s1.distance.strides == (0, 0, 0)
# but this one does not.
oldshape = self.s1.shape
with pytest.raises(ValueError):
self.s1.shape = (1,)
with pytest.raises(AttributeError):
self.s1.shape = (42,)
assert self.s1.shape == oldshape
assert self.s1.lon.shape == oldshape
assert self.s1.lat.shape == oldshape
assert self.s1.distance.shape == oldshape
# Finally, a more complicated one that checks that things get reset
# properly if it is not the first component that fails.
s2 = SphericalRepresentation(
self.s1.lon.copy(), self.s1.lat, self.s1.distance, copy=False
)
assert 0 not in s2.lon.strides
assert 0 in s2.lat.strides
with pytest.raises(AttributeError):
s2.shape = (42,)
assert s2.shape == oldshape
assert s2.lon.shape == oldshape
assert s2.lat.shape == oldshape
assert s2.distance.shape == oldshape
assert 0 not in s2.lon.strides
assert 0 in s2.lat.strides
class TestShapeFunctions(ShapeSetup):
@needs_array_function
def test_broadcast_to(self):
s0_broadcast = np.broadcast_to(self.s0, (3, 6, 7))
s0_diff = s0_broadcast.differentials["s"]
assert type(s0_broadcast) is type(self.s0)
assert s0_broadcast.shape == (3, 6, 7)
assert s0_diff.shape == s0_broadcast.shape
assert np.all(s0_broadcast.lon == self.s0.lon)
assert np.all(s0_broadcast.lat == self.s0.lat)
assert np.all(s0_broadcast.distance == self.s0.distance)
assert np.may_share_memory(s0_broadcast.lon, self.s0.lon)
assert np.may_share_memory(s0_broadcast.lat, self.s0.lat)
assert np.may_share_memory(s0_broadcast.distance, self.s0.distance)
s1_broadcast = np.broadcast_to(self.s1, shape=(3, 6, 7))
s1_diff = s1_broadcast.differentials["s"]
assert s1_broadcast.shape == (3, 6, 7)
assert s1_diff.shape == s1_broadcast.shape
assert np.all(s1_broadcast.lat == self.s1.lat)
assert np.all(s1_broadcast.lon == self.s1.lon)
assert np.all(s1_broadcast.distance == self.s1.distance)
assert s1_broadcast.distance.shape == (3, 6, 7)
assert np.may_share_memory(s1_broadcast.lat, self.s1.lat)
assert np.may_share_memory(s1_broadcast.lon, self.s1.lon)
assert np.may_share_memory(s1_broadcast.distance, self.s1.distance)
# A final test that "may_share_memory" equals "does_share_memory"
# Do this on a copy, to keep self.s0 unchanged.
sc = self.s0.copy()
assert not np.may_share_memory(sc.lon, self.s0.lon)
assert not np.may_share_memory(sc.lat, self.s0.lat)
sc_broadcast = np.broadcast_to(sc, (3, 6, 7))
assert np.may_share_memory(sc_broadcast.lon, sc.lon)
# Can only write to copy, not to broadcast version.
sc.lon[0, 0] = 22.0 * u.hourangle
assert np.all(sc_broadcast.lon[:, 0, 0] == 22.0 * u.hourangle)
@needs_array_function
def test_atleast_1d(self):
s00 = self.s0.ravel()[0]
assert s00.ndim == 0
s00_1d = np.atleast_1d(s00)
assert s00_1d.ndim == 1
assert np.all(representation_equal(s00[np.newaxis], s00_1d))
assert np.may_share_memory(s00_1d.lon, s00.lon)
@needs_array_function
def test_atleast_2d(self):
s0r = self.s0.ravel()
assert s0r.ndim == 1
s0r_2d = np.atleast_2d(s0r)
assert s0r_2d.ndim == 2
assert np.all(representation_equal(s0r[np.newaxis], s0r_2d))
assert np.may_share_memory(s0r_2d.lon, s0r.lon)
@needs_array_function
def test_atleast_3d(self):
assert self.s0.ndim == 2
s0_3d, s1_3d = np.atleast_3d(self.s0, self.s1)
assert s0_3d.ndim == s1_3d.ndim == 3
assert np.all(representation_equal(self.s0[:, :, np.newaxis], s0_3d))
assert np.all(representation_equal(self.s1[:, :, np.newaxis], s1_3d))
assert np.may_share_memory(s0_3d.lon, self.s0.lon)
def test_move_axis(self):
# Goes via transpose so works without __array_function__ as well.
s0_10 = np.moveaxis(self.s0, 0, 1)
assert s0_10.shape == (self.s0.shape[1], self.s0.shape[0])
assert np.all(representation_equal(self.s0.T, s0_10))
assert np.may_share_memory(s0_10.lon, self.s0.lon)
def test_roll_axis(self):
# Goes via transpose so works without __array_function__ as well.
s0_10 = np.rollaxis(self.s0, 1)
assert s0_10.shape == (self.s0.shape[1], self.s0.shape[0])
assert np.all(representation_equal(self.s0.T, s0_10))
assert np.may_share_memory(s0_10.lon, self.s0.lon)
@needs_array_function
def test_fliplr(self):
s0_lr = np.fliplr(self.s0)
assert np.all(representation_equal(self.s0[:, ::-1], s0_lr))
assert np.may_share_memory(s0_lr.lon, self.s0.lon)
@needs_array_function
def test_rot90(self):
s0_270 = np.rot90(self.s0, 3)
assert np.all(representation_equal(self.s0.T[:, ::-1], s0_270))
assert np.may_share_memory(s0_270.lon, self.s0.lon)
@needs_array_function
def test_roll(self):
s0r = np.roll(self.s0, 1, axis=0)
assert np.all(representation_equal(s0r[1:], self.s0[:-1]))
assert np.all(representation_equal(s0r[0], self.s0[-1]))
@needs_array_function
def test_delete(self):
s0d = np.delete(self.s0, [2, 3], axis=0)
assert np.all(representation_equal(s0d[:2], self.s0[:2]))
assert np.all(representation_equal(s0d[2:], self.s0[4:]))
@pytest.mark.parametrize("attribute", ["shape", "ndim", "size"])
def test_shape_attribute_functions(self, attribute):
function = getattr(np, attribute)
result = function(self.s0)
assert result == getattr(self.s0, attribute)
|
a6bd4081862b6c865de93e992b869f444ed8cc5035618170f0827cd54203a127 | import numpy as np
import pytest
from astropy import units as u
from astropy.constants import c as speed_of_light
from astropy.coordinates import Angle, Distance, EarthLocation, SkyCoord
from astropy.coordinates.sites import get_builtin_sites
from astropy.table import Table
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import Time
from astropy.utils.data import get_pkg_data_filename
@pytest.mark.parametrize("kind", ["heliocentric", "barycentric"])
def test_basic(kind):
t0 = Time("2015-1-1")
loc = get_builtin_sites()["example_site"]
sc = SkyCoord(0, 0, unit=u.deg, obstime=t0, location=loc)
rvc0 = sc.radial_velocity_correction(kind)
assert rvc0.shape == ()
assert rvc0.unit.is_equivalent(u.km / u.s)
scs = SkyCoord(0, 0, unit=u.deg, obstime=t0 + np.arange(10) * u.day, location=loc)
rvcs = scs.radial_velocity_correction(kind)
assert rvcs.shape == (10,)
assert rvcs.unit.is_equivalent(u.km / u.s)
test_input_time = Time(2457244.5, format="jd")
# test_input_loc = EarthLocation.of_site('Cerro Paranal')
# to avoid the network hit we just copy here what that yields
test_input_loc = EarthLocation.from_geodetic(
lon=-70.403 * u.deg, lat=-24.6252 * u.deg, height=2635 * u.m
)
def test_helio_iraf():
"""
Compare the heliocentric correction to the IRAF rvcorrect.
`generate_IRAF_input` function is provided to show how the comparison data
was produced
"""
# this is based on running IRAF with the output of `generate_IRAF_input` below
rvcorr_result = """
# RVCORRECT: Observatory parameters for European Southern Observatory: Paranal
# latitude = -24:37.5
# longitude = 70:24.2
# altitude = 2635
## HJD VOBS VHELIO VLSR VDIURNAL VLUNAR VANNUAL VSOLAR
2457244.50120 0.00 -10.36 -20.35 -0.034 -0.001 -10.325 -9.993
2457244.50025 0.00 -14.20 -23.86 -0.115 -0.004 -14.085 -9.656
2457244.50278 0.00 -2.29 -11.75 0.115 0.004 -2.413 -9.459
2457244.50025 0.00 -14.20 -23.86 -0.115 -0.004 -14.085 -9.656
2457244.49929 0.00 -17.41 -26.30 -0.192 -0.006 -17.214 -8.888
2457244.50317 0.00 -17.19 -17.44 0.078 0.001 -17.269 -0.253
2457244.50348 0.00 2.35 -6.21 0.192 0.006 2.156 -8.560
2457244.49959 0.00 2.13 -15.06 -0.078 -0.000 2.211 -17.194
2457244.49929 0.00 -17.41 -26.30 -0.192 -0.006 -17.214 -8.888
2457244.49835 0.00 -19.84 -27.56 -0.259 -0.008 -19.573 -7.721
2457244.50186 0.00 -24.47 -22.16 -0.038 -0.004 -24.433 2.313
2457244.50470 0.00 -11.11 -8.57 0.221 0.005 -11.332 2.534
2457244.50402 0.00 6.90 -0.38 0.259 0.008 6.629 -7.277
2457244.50051 0.00 11.53 -5.78 0.038 0.004 11.489 -17.311
2457244.49768 0.00 -1.84 -19.37 -0.221 -0.004 -1.612 -17.533
2457244.49835 0.00 -19.84 -27.56 -0.259 -0.008 -19.573 -7.721
2457244.49749 0.00 -21.38 -27.59 -0.315 -0.010 -21.056 -6.209
2457244.50109 0.00 -27.69 -22.90 -0.096 -0.006 -27.584 4.785
2457244.50457 0.00 -17.00 -9.30 0.196 0.003 -17.201 7.704
2457244.50532 0.00 2.62 2.97 0.340 0.009 2.276 0.349
2457244.50277 0.00 16.42 4.67 0.228 0.009 16.178 -11.741
2457244.49884 0.00 13.98 -5.48 -0.056 0.002 14.039 -19.463
2457244.49649 0.00 -2.84 -19.84 -0.297 -0.007 -2.533 -17.000
2457244.49749 0.00 -21.38 -27.59 -0.315 -0.010 -21.056 -6.209
2457244.49675 0.00 -21.97 -26.39 -0.357 -0.011 -21.598 -4.419
2457244.50025 0.00 -29.30 -22.47 -0.149 -0.008 -29.146 6.831
2457244.50398 0.00 -21.55 -9.88 0.146 0.001 -21.700 11.670
2457244.50577 0.00 -3.26 4.00 0.356 0.009 -3.623 7.263
2457244.50456 0.00 14.87 11.06 0.357 0.011 14.497 -3.808
2457244.50106 0.00 22.20 7.14 0.149 0.008 22.045 -15.058
2457244.49732 0.00 14.45 -5.44 -0.146 -0.001 14.600 -19.897
2457244.49554 0.00 -3.84 -19.33 -0.356 -0.008 -3.478 -15.491
2457244.49675 0.00 -21.97 -26.39 -0.357 -0.011 -21.598 -4.419
2457244.49615 0.00 -21.57 -24.00 -0.383 -0.012 -21.172 -2.432
2457244.49942 0.00 -29.36 -20.83 -0.193 -0.009 -29.157 8.527
2457244.50312 0.00 -24.26 -9.75 0.088 -0.001 -24.348 14.511
2457244.50552 0.00 -8.66 4.06 0.327 0.007 -8.996 12.721
2457244.50549 0.00 10.14 14.13 0.413 0.012 9.715 3.994
2457244.50305 0.00 23.35 15.76 0.306 0.011 23.031 -7.586
2457244.49933 0.00 24.78 8.18 0.056 0.006 24.721 -16.601
2457244.49609 0.00 13.77 -5.06 -0.221 -0.003 13.994 -18.832
2457244.49483 0.00 -4.53 -17.77 -0.394 -0.010 -4.131 -13.237
2457244.49615 0.00 -21.57 -24.00 -0.383 -0.012 -21.172 -2.432
2457244.49572 0.00 -20.20 -20.54 -0.392 -0.013 -19.799 -0.335
2457244.49907 0.00 -28.17 -17.30 -0.197 -0.009 -27.966 10.874
2457244.50285 0.00 -22.96 -5.96 0.090 -0.001 -23.048 16.995
2457244.50531 0.00 -7.00 8.16 0.335 0.007 -7.345 15.164
2457244.50528 0.00 12.23 18.47 0.423 0.012 11.795 6.238
2457244.50278 0.00 25.74 20.13 0.313 0.012 25.416 -5.607
2457244.49898 0.00 27.21 12.38 0.057 0.006 27.144 -14.829
2457244.49566 0.00 15.94 -1.17 -0.226 -0.003 16.172 -17.111
2457244.49437 0.00 -2.78 -14.17 -0.403 -0.010 -2.368 -11.387
2457244.49572 0.00 -20.20 -20.54 -0.392 -0.013 -19.799 -0.335
2457244.49548 0.00 -17.94 -16.16 -0.383 -0.012 -17.541 1.776
2457244.49875 0.00 -25.73 -12.99 -0.193 -0.009 -25.525 12.734
2457244.50246 0.00 -20.63 -1.91 0.088 -0.001 -20.716 18.719
2457244.50485 0.00 -5.03 11.90 0.327 0.007 -5.365 16.928
2457244.50482 0.00 13.77 21.97 0.413 0.012 13.347 8.202
2457244.50238 0.00 26.98 23.60 0.306 0.011 26.663 -3.378
2457244.49867 0.00 28.41 16.02 0.056 0.005 28.353 -12.393
2457244.49542 0.00 17.40 2.78 -0.221 -0.003 17.625 -14.625
2457244.49416 0.00 -0.90 -9.93 -0.394 -0.010 -0.499 -9.029
2457244.49548 0.00 -17.94 -16.16 -0.383 -0.012 -17.541 1.776
2457244.49544 0.00 -14.87 -11.06 -0.357 -0.011 -14.497 3.808
2457244.49894 0.00 -22.20 -7.14 -0.149 -0.008 -22.045 15.058
2457244.50268 0.00 -14.45 5.44 0.146 0.001 -14.600 19.897
2457244.50446 0.00 3.84 19.33 0.356 0.008 3.478 15.491
2457244.50325 0.00 21.97 26.39 0.357 0.011 21.598 4.419
2457244.49975 0.00 29.30 22.47 0.149 0.008 29.146 -6.831
2457244.49602 0.00 21.55 9.88 -0.146 -0.001 21.700 -11.670
2457244.49423 0.00 3.26 -4.00 -0.356 -0.009 3.623 -7.263
2457244.49544 0.00 -14.87 -11.06 -0.357 -0.011 -14.497 3.808
2457244.49561 0.00 -11.13 -5.46 -0.315 -0.010 -10.805 5.670
2457244.49921 0.00 -17.43 -0.77 -0.096 -0.006 -17.333 16.664
2457244.50269 0.00 -6.75 12.83 0.196 0.003 -6.949 19.583
2457244.50344 0.00 12.88 25.10 0.340 0.009 12.527 12.227
2457244.50089 0.00 26.67 26.80 0.228 0.009 26.430 0.137
2457244.49696 0.00 24.24 16.65 -0.056 0.002 24.290 -7.584
2457244.49461 0.00 7.42 2.29 -0.297 -0.007 7.719 -5.122
2457244.49561 0.00 -11.13 -5.46 -0.315 -0.010 -10.805 5.670
2457244.49598 0.00 -6.90 0.38 -0.259 -0.008 -6.629 7.277
2457244.49949 0.00 -11.53 5.78 -0.038 -0.004 -11.489 17.311
2457244.50232 0.00 1.84 19.37 0.221 0.004 1.612 17.533
2457244.50165 0.00 19.84 27.56 0.259 0.008 19.573 7.721
2457244.49814 0.00 24.47 22.16 0.038 0.004 24.433 -2.313
2457244.49530 0.00 11.11 8.57 -0.221 -0.005 11.332 -2.534
2457244.49598 0.00 -6.90 0.38 -0.259 -0.008 -6.629 7.277
2457244.49652 0.00 -2.35 6.21 -0.192 -0.006 -2.156 8.560
2457244.50041 0.00 -2.13 15.06 0.078 0.000 -2.211 17.194
2457244.50071 0.00 17.41 26.30 0.192 0.006 17.214 8.888
2457244.49683 0.00 17.19 17.44 -0.078 -0.001 17.269 0.253
2457244.49652 0.00 -2.35 6.21 -0.192 -0.006 -2.156 8.560
2457244.49722 0.00 2.29 11.75 -0.115 -0.004 2.413 9.459
2457244.49975 0.00 14.20 23.86 0.115 0.004 14.085 9.656
2457244.49722 0.00 2.29 11.75 -0.115 -0.004 2.413 9.459
2457244.49805 0.00 6.84 16.77 -0.034 -0.001 6.874 9.935
"""
vhs_iraf = []
for line in rvcorr_result.strip().split("\n"):
if not line.strip().startswith("#"):
vhs_iraf.append(float(line.split()[2]))
vhs_iraf = vhs_iraf * u.km / u.s
targets = SkyCoord(
_get_test_input_radecs(), obstime=test_input_time, location=test_input_loc
)
vhs_astropy = targets.radial_velocity_correction("heliocentric")
assert_quantity_allclose(vhs_astropy, vhs_iraf, atol=150 * u.m / u.s)
def generate_IRAF_input(writefn=None):
dt = test_input_time.utc.datetime
coos = _get_test_input_radecs()
lines = []
for ra, dec in zip(coos.ra, coos.dec):
rastr = Angle(ra).to_string(u.hour, sep=":")
decstr = Angle(dec).to_string(u.deg, sep=":")
lines.append(
f"{dt.year} {dt.month} {dt.day} {dt.hour}:{dt.minute} {rastr} {decstr}"
)
if writefn:
with open(writefn, "w") as f:
for l in lines:
f.write(l)
else:
for l in lines:
print(l)
print("Run IRAF as:\nastutil\nrvcorrect f=<filename> observatory=Paranal")
def _get_test_input_radecs():
ras = []
decs = []
for dec in np.linspace(-85, 85, 15):
nra = int(np.round(10 * np.cos(dec * u.deg)).value)
ras1 = np.linspace(-180, 180 - 1e-6, nra)
ras.extend(ras1)
decs.extend([dec] * len(ras1))
return SkyCoord(ra=ras, dec=decs, unit=u.deg)
def test_barycorr():
# this is the result of calling _get_barycorr_bvcs
# fmt: off
barycorr_bvcs = u.Quantity([
-10335.93326096, -14198.47605491, -2237.60012494, -14198.47595363,
-17425.46512587, -17131.70901174, 2424.37095076, 2130.61519166,
-17425.46495779, -19872.50026998, -24442.37091097, -11017.08975893,
6978.0622355, 11547.93333743, -1877.34772637, -19872.50004258,
-21430.08240017, -27669.14280689, -16917.08506807, 2729.57222968,
16476.49569232, 13971.97171764, -2898.04250914, -21430.08212368,
-22028.51337105, -29301.92349394, -21481.13036199, -3147.44828909,
14959.50065514, 22232.91155425, 14412.11903105, -3921.56359768,
-22028.51305781, -21641.01479409, -29373.0512649, -24205.90521765,
-8557.34138828, 10250.50350732, 23417.2299926, 24781.98057941,
13706.17339044, -4627.70005932, -21641.01445812, -20284.92627505,
-28193.91696959, -22908.51624166, -6901.82132125, 12336.45758056,
25804.51614607, 27200.50029664, 15871.21385688, -2882.24738355,
-20284.9259314, -18020.92947805, -25752.96564978, -20585.81957567,
-4937.25573801, 13870.58916957, 27037.31568441, 28402.06636994,
17326.25977035, -1007.62209045, -18020.92914212, -14950.33284575,
-22223.74260839, -14402.94943965, 3930.73265119, 22037.68163353,
29311.09265126, 21490.30070307, 3156.62229843, -14950.33253252,
-11210.53846867, -17449.59867676, -6697.54090389, 12949.11642965,
26696.03999586, 24191.5164355, 7321.50355488, -11210.53819218,
-6968.89359681, -11538.76423011, 1886.51695238, 19881.66902396,
24451.54039956, 11026.26000765, -6968.89336945, -2415.20201758,
-2121.44599781, 17434.63406085, 17140.87871753, -2415.2018495,
2246.76923076, 14207.64513054, 2246.76933194, 6808.40787728],
u.m/u.s)
# fmt: on
# this tries the *other* way of calling radial_velocity_correction relative
# to the IRAF tests
targets = _get_test_input_radecs()
bvcs_astropy = targets.radial_velocity_correction(
obstime=test_input_time, location=test_input_loc, kind="barycentric"
)
assert_quantity_allclose(bvcs_astropy, barycorr_bvcs, atol=10 * u.mm / u.s)
def _get_barycorr_bvcs(coos, loc, injupyter=False):
"""
Gets the barycentric correction of the test data from the
http://astroutils.astronomy.ohio-state.edu/exofast/barycorr.html web site.
Requires the https://github.com/tronsgaard/barycorr python interface to that
site.
Provided to reproduce the test data above, but not required to actually run
the tests.
"""
import barycorr
from astropy.utils.console import ProgressBar
bvcs = []
for ra, dec in ProgressBar(
list(zip(coos.ra.deg, coos.dec.deg)), ipython_widget=injupyter
):
res = barycorr.bvc(
test_input_time.utc.jd,
ra,
dec,
lat=loc.geodetic[1].deg,
lon=loc.geodetic[0].deg,
elevation=loc.geodetic[2].to(u.m).value,
)
bvcs.append(res)
return bvcs * u.m / u.s
def test_rvcorr_multiple_obstimes_onskycoord():
loc = EarthLocation(-2309223 * u.m, -3695529 * u.m, -4641767 * u.m)
arrtime = Time("2005-03-21 00:00:00") + np.linspace(-1, 1, 10) * u.day
sc = SkyCoord(1 * u.deg, 2 * u.deg, 100 * u.kpc, obstime=arrtime, location=loc)
rvcbary_sc2 = sc.radial_velocity_correction(kind="barycentric")
assert len(rvcbary_sc2) == 10
# check the multiple-obstime and multi- mode
sc = SkyCoord(
([1] * 10) * u.deg, 2 * u.deg, 100 * u.kpc, obstime=arrtime, location=loc
)
rvcbary_sc3 = sc.radial_velocity_correction(kind="barycentric")
assert len(rvcbary_sc3) == 10
def test_invalid_argument_combos():
loc = EarthLocation(-2309223 * u.m, -3695529 * u.m, -4641767 * u.m)
time = Time("2005-03-21 00:00:00")
timel = Time("2005-03-21 00:00:00", location=loc)
scwattrs = SkyCoord(1 * u.deg, 2 * u.deg, obstime=time, location=loc)
scwoattrs = SkyCoord(1 * u.deg, 2 * u.deg)
scwattrs.radial_velocity_correction()
with pytest.raises(ValueError):
scwattrs.radial_velocity_correction(obstime=time, location=loc)
with pytest.raises(TypeError):
scwoattrs.radial_velocity_correction(obstime=time)
scwoattrs.radial_velocity_correction(obstime=time, location=loc)
with pytest.raises(TypeError):
scwoattrs.radial_velocity_correction()
with pytest.raises(ValueError):
scwattrs.radial_velocity_correction(timel)
def test_regression_9645():
sc = SkyCoord(
10 * u.deg,
20 * u.deg,
distance=5 * u.pc,
obstime=test_input_time,
pm_ra_cosdec=0 * u.mas / u.yr,
pm_dec=0 * u.mas / u.yr,
radial_velocity=0 * u.km / u.s,
)
sc_novel = SkyCoord(
10 * u.deg, 20 * u.deg, distance=5 * u.pc, obstime=test_input_time
)
corr = sc.radial_velocity_correction(
obstime=test_input_time, location=test_input_loc
)
corr_novel = sc_novel.radial_velocity_correction(
obstime=test_input_time, location=test_input_loc
)
assert_quantity_allclose(corr, corr_novel)
def test_barycorr_withvels():
# this is the result of calling _get_barycorr_bvcs_withvels
# fmt: off
barycorr_bvcs = u.Quantity(
[-10335.94926581, -14198.49117304, -2237.58656335,
-14198.49078575, -17425.47883864, -17131.72711182,
2424.38466675, 2130.62819093, -17425.47834604,
-19872.51254565, -24442.39064348, -11017.0964353,
6978.07515501, 11547.94831175, -1877.34560543,
-19872.51188308, -21430.0931411, -27669.15919972,
-16917.09482078, 2729.57757823, 16476.5087925,
13971.97955641, -2898.04451551, -21430.09220144,
-22028.52224227, -29301.93613248, -21481.14015151,
-3147.44852058, 14959.50849997, 22232.91906264,
14412.12044201, -3921.56783473, -22028.52088749,
-21641.02117064, -29373.05982792, -24205.91319258,
-8557.34473049, 10250.50560918, 23417.23357219,
24781.98113432, 13706.17025059, -4627.70468688,
-21641.01928189, -20284.92926795, -28193.92117514,
-22908.52127321, -6901.82512637, 12336.45557256,
25804.5137786, 27200.49576347, 15871.20847332,
-2882.25080211, -20284.92696256, -18020.92824383,
-25752.96528309, -20585.82211189, -4937.26088706,
13870.58217495, 27037.30698639, 28402.0571686,
17326.25314311, -1007.62313006, -18020.92552769,
-14950.32653444, -22223.73793506, -14402.95155047,
3930.72325162, 22037.66749783, 29311.07826101,
21490.29193529, 3156.62360741, -14950.32373745,
-11210.52665171, -17449.59068509, -6697.54579192,
12949.09948082, 26696.01956077, 24191.50403015,
7321.50684816, -11210.52389393, -6968.87610888,
-11538.7547047, 1886.50525065, 19881.64366561,
24451.52197666, 11026.26396455, -6968.87351156,
-2415.17899385, -2121.44598968, 17434.60465075,
17140.87204017, -2415.1771038, 2246.79688215,
14207.61339552, 2246.79790276, 6808.43888253], u.m/u.s)
# fmt: on
coos = _get_test_input_radecvels()
bvcs_astropy = coos.radial_velocity_correction(
obstime=test_input_time, location=test_input_loc
)
assert_quantity_allclose(bvcs_astropy, barycorr_bvcs, atol=10 * u.mm / u.s)
def _get_test_input_radecvels():
coos = _get_test_input_radecs()
ras = coos.ra
decs = coos.dec
pmra = np.linspace(-1000, 1000, coos.size) * u.mas / u.yr
pmdec = np.linspace(0, 1000, coos.size) * u.mas / u.yr
rvs = np.linspace(0, 100, coos.size) * u.km / u.s
distance = np.linspace(10, 100, coos.size) * u.pc
return SkyCoord(
ras,
decs,
pm_ra_cosdec=pmra,
pm_dec=pmdec,
radial_velocity=rvs,
distance=distance,
obstime=test_input_time,
)
def _get_barycorr_bvcs_withvels(coos, loc, injupyter=False):
"""
Gets the barycentric correction of the test data from the
http://astroutils.astronomy.ohio-state.edu/exofast/barycorr.html web site.
Requires the https://github.com/tronsgaard/barycorr python interface to that
site.
Provided to reproduce the test data above, but not required to actually run
the tests.
"""
import barycorr
from astropy.utils.console import ProgressBar
bvcs = []
for coo in ProgressBar(coos, ipython_widget=injupyter):
res = barycorr.bvc(
test_input_time.utc.jd,
coo.ra.deg,
coo.dec.deg,
lat=loc.geodetic[1].deg,
lon=loc.geodetic[0].deg,
pmra=coo.pm_ra_cosdec.to_value(u.mas / u.yr),
pmdec=coo.pm_dec.to_value(u.mas / u.yr),
parallax=coo.distance.to_value(u.mas, equivalencies=u.parallax()),
rv=coo.radial_velocity.to_value(u.m / u.s),
epoch=test_input_time.utc.jd,
elevation=loc.geodetic[2].to(u.m).value,
)
bvcs.append(res)
return bvcs * u.m / u.s
def test_warning_no_obstime_on_skycoord():
c = SkyCoord(
l=10 * u.degree,
b=45 * u.degree,
pm_l_cosb=34 * u.mas / u.yr,
pm_b=-117 * u.mas / u.yr,
distance=50 * u.pc,
frame="galactic",
)
with pytest.warns(Warning):
c.radial_velocity_correction("barycentric", test_input_time, test_input_loc)
@pytest.mark.remote_data
def test_regression_10094():
"""
Make sure that when we include the proper motion and radial velocity of
a SkyCoord, our velocity corrections remain close to TEMPO2.
We check that tau Ceti is within 5mm/s
"""
# Wright & Eastman (2014) Table2
# Corrections for tau Ceti
wright_table = Table.read(
get_pkg_data_filename("coordinates/wright_eastmann_2014_tau_ceti.fits")
)
reduced_jds = wright_table["JD-2400000"]
tempo2 = wright_table["TEMPO2"]
barycorr = wright_table["BARYCORR"]
# tau Ceti Hipparchos data
tauCet = SkyCoord(
"01 44 05.1275 -15 56 22.4006",
unit=(u.hour, u.deg),
pm_ra_cosdec=-1721.05 * u.mas / u.yr,
pm_dec=854.16 * u.mas / u.yr,
distance=Distance(parallax=273.96 * u.mas),
radial_velocity=-16.597 * u.km / u.s,
obstime=Time(48348.5625, format="mjd"),
)
# CTIO location as used in Wright & Eastmann
xyz = u.Quantity([1814985.3, -5213916.8, -3187738.1], u.m)
obs = EarthLocation(*xyz)
times = Time(2400000, reduced_jds, format="jd")
tempo2 = tempo2 * speed_of_light
barycorr = barycorr * speed_of_light
astropy = tauCet.radial_velocity_correction(location=obs, obstime=times)
assert_quantity_allclose(astropy, tempo2, atol=5 * u.mm / u.s)
assert_quantity_allclose(astropy, barycorr, atol=5 * u.mm / u.s)
|
93877727095e16cc57126972bf118233a1c1b081e5f686b308ed8913c84799ef | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.coordinates import EarthLocation, Latitude, Longitude, SkyCoord
# test on frame with most complicated frame attributes.
from astropy.coordinates.builtin_frames import GCRS, ICRS, AltAz
from astropy.time import Time
from astropy.units.quantity_helper.function_helpers import ARRAY_FUNCTION_ENABLED
@pytest.fixture(params=[True, False] if ARRAY_FUNCTION_ENABLED else [True])
def method(request):
return request.param
needs_array_function = pytest.mark.xfail(
not ARRAY_FUNCTION_ENABLED, reason="Needs __array_function__ support"
)
class TestManipulation:
"""Manipulation of Frame shapes.
Checking that attributes are manipulated correctly.
Even more exhaustive tests are done in time.tests.test_methods
"""
def setup_class(cls):
# For these tests, we set up frames and coordinates using copy=False,
# so we can check that broadcasting is handled correctly.
lon = Longitude(np.arange(0, 24, 4), u.hourangle)
lat = Latitude(np.arange(-90, 91, 30), u.deg)
# With same-sized arrays, no attributes.
cls.s0 = ICRS(
lon[:, np.newaxis] * np.ones(lat.shape),
lat * np.ones(lon.shape)[:, np.newaxis],
copy=False,
)
# Make an AltAz frame since that has many types of attributes.
# Match one axis with times.
cls.obstime = Time("2012-01-01") + np.arange(len(lon))[:, np.newaxis] * u.s
# And another with location.
cls.location = EarthLocation(20.0 * u.deg, lat, 100 * u.m)
# Ensure we have a quantity scalar.
cls.pressure = 1000 * u.hPa
# As well as an array.
cls.temperature = (
np.random.uniform(0.0, 20.0, size=(lon.size, lat.size)) * u.deg_C
)
cls.s1 = AltAz(
az=lon[:, np.newaxis],
alt=lat,
obstime=cls.obstime,
location=cls.location,
pressure=cls.pressure,
temperature=cls.temperature,
copy=False,
)
# For some tests, also try a GCRS, since that has representation
# attributes. We match the second dimension (via the location)
cls.obsgeoloc, cls.obsgeovel = cls.location.get_gcrs_posvel(cls.obstime[0, 0])
cls.s2 = GCRS(
ra=lon[:, np.newaxis],
dec=lat,
obstime=cls.obstime,
obsgeoloc=cls.obsgeoloc,
obsgeovel=cls.obsgeovel,
copy=False,
)
# For completeness, also some tests on an empty frame.
cls.s3 = GCRS(
obstime=cls.obstime,
obsgeoloc=cls.obsgeoloc,
obsgeovel=cls.obsgeovel,
copy=False,
)
# And make a SkyCoord
cls.sc = SkyCoord(ra=lon[:, np.newaxis], dec=lat, frame=cls.s3, copy=False)
def test_getitem0101(self):
# We on purpose take a slice with only one element, as for the
# general tests it doesn't matter, but it allows us to check
# for a few cases that shapes correctly become scalar if we
# index our size-1 array down to a scalar. See gh-10113.
item = (slice(0, 1), slice(0, 1))
s0_0101 = self.s0[item]
assert s0_0101.shape == (1, 1)
assert_array_equal(s0_0101.data.lon, self.s0.data.lon[item])
assert np.may_share_memory(s0_0101.data.lon, self.s0.data.lon)
assert np.may_share_memory(s0_0101.data.lat, self.s0.data.lat)
s0_0101_00 = s0_0101[0, 0]
assert s0_0101_00.shape == ()
assert s0_0101_00.data.lon.shape == ()
assert_array_equal(s0_0101_00.data.lon, self.s0.data.lon[0, 0])
s1_0101 = self.s1[item]
assert s1_0101.shape == (1, 1)
assert_array_equal(s1_0101.data.lon, self.s1.data.lon[item])
assert np.may_share_memory(s1_0101.data.lat, self.s1.data.lat)
assert np.all(s1_0101.obstime == self.s1.obstime[item])
assert np.may_share_memory(s1_0101.obstime.jd1, self.s1.obstime.jd1)
assert_array_equal(s1_0101.location, self.s1.location[0, 0])
assert np.may_share_memory(s1_0101.location, self.s1.location)
assert_array_equal(s1_0101.temperature, self.s1.temperature[item])
assert np.may_share_memory(s1_0101.temperature, self.s1.temperature)
# scalar should just be transferred.
assert s1_0101.pressure is self.s1.pressure
s1_0101_00 = s1_0101[0, 0]
assert s1_0101_00.shape == ()
assert s1_0101_00.obstime.shape == ()
assert s1_0101_00.obstime == self.s1.obstime[0, 0]
s2_0101 = self.s2[item]
assert s2_0101.shape == (1, 1)
assert np.all(s2_0101.data.lon == self.s2.data.lon[item])
assert np.may_share_memory(s2_0101.data.lat, self.s2.data.lat)
assert np.all(s2_0101.obstime == self.s2.obstime[item])
assert np.may_share_memory(s2_0101.obstime.jd1, self.s2.obstime.jd1)
assert_array_equal(s2_0101.obsgeoloc.xyz, self.s2.obsgeoloc[item].xyz)
s3_0101 = self.s3[item]
assert s3_0101.shape == (1, 1)
assert s3_0101.obstime.shape == (1, 1)
assert np.all(s3_0101.obstime == self.s3.obstime[item])
assert np.may_share_memory(s3_0101.obstime.jd1, self.s3.obstime.jd1)
assert_array_equal(s3_0101.obsgeoloc.xyz, self.s3.obsgeoloc[item].xyz)
sc_0101 = self.sc[item]
assert sc_0101.shape == (1, 1)
assert_array_equal(sc_0101.data.lon, self.sc.data.lon[item])
assert np.may_share_memory(sc_0101.data.lat, self.sc.data.lat)
assert np.all(sc_0101.obstime == self.sc.obstime[item])
assert np.may_share_memory(sc_0101.obstime.jd1, self.sc.obstime.jd1)
assert_array_equal(sc_0101.obsgeoloc.xyz, self.sc.obsgeoloc[item].xyz)
def test_ravel(self):
s0_ravel = self.s0.ravel()
assert s0_ravel.shape == (self.s0.size,)
assert np.all(s0_ravel.data.lon == self.s0.data.lon.ravel())
assert np.may_share_memory(s0_ravel.data.lon, self.s0.data.lon)
assert np.may_share_memory(s0_ravel.data.lat, self.s0.data.lat)
# Since s1 lon, lat were broadcast, ravel needs to make a copy.
s1_ravel = self.s1.ravel()
assert s1_ravel.shape == (self.s1.size,)
assert np.all(s1_ravel.data.lon == self.s1.data.lon.ravel())
assert not np.may_share_memory(s1_ravel.data.lat, self.s1.data.lat)
assert np.all(s1_ravel.obstime == self.s1.obstime.ravel())
assert not np.may_share_memory(s1_ravel.obstime.jd1, self.s1.obstime.jd1)
assert np.all(s1_ravel.location == self.s1.location.ravel())
assert not np.may_share_memory(s1_ravel.location, self.s1.location)
assert np.all(s1_ravel.temperature == self.s1.temperature.ravel())
assert np.may_share_memory(s1_ravel.temperature, self.s1.temperature)
assert s1_ravel.pressure == self.s1.pressure
s2_ravel = self.s2.ravel()
assert s2_ravel.shape == (self.s2.size,)
assert np.all(s2_ravel.data.lon == self.s2.data.lon.ravel())
assert not np.may_share_memory(s2_ravel.data.lat, self.s2.data.lat)
assert np.all(s2_ravel.obstime == self.s2.obstime.ravel())
assert not np.may_share_memory(s2_ravel.obstime.jd1, self.s2.obstime.jd1)
# CartesianRepresentation do not allow direct comparisons, as this is
# too tricky to get right in the face of rounding issues. Here, though,
# it cannot be an issue, so we compare the xyz quantities.
assert np.all(s2_ravel.obsgeoloc.xyz == self.s2.obsgeoloc.ravel().xyz)
assert not np.may_share_memory(s2_ravel.obsgeoloc.x, self.s2.obsgeoloc.x)
s3_ravel = self.s3.ravel()
assert s3_ravel.shape == (42,) # cannot use .size on frame w/o data.
assert np.all(s3_ravel.obstime == self.s3.obstime.ravel())
assert not np.may_share_memory(s3_ravel.obstime.jd1, self.s3.obstime.jd1)
assert np.all(s3_ravel.obsgeoloc.xyz == self.s3.obsgeoloc.ravel().xyz)
assert not np.may_share_memory(s3_ravel.obsgeoloc.x, self.s3.obsgeoloc.x)
sc_ravel = self.sc.ravel()
assert sc_ravel.shape == (self.sc.size,)
assert np.all(sc_ravel.data.lon == self.sc.data.lon.ravel())
assert not np.may_share_memory(sc_ravel.data.lat, self.sc.data.lat)
assert np.all(sc_ravel.obstime == self.sc.obstime.ravel())
assert not np.may_share_memory(sc_ravel.obstime.jd1, self.sc.obstime.jd1)
assert np.all(sc_ravel.obsgeoloc.xyz == self.sc.obsgeoloc.ravel().xyz)
assert not np.may_share_memory(sc_ravel.obsgeoloc.x, self.sc.obsgeoloc.x)
def test_flatten(self):
s0_flatten = self.s0.flatten()
assert s0_flatten.shape == (self.s0.size,)
assert np.all(s0_flatten.data.lon == self.s0.data.lon.flatten())
# Flatten always copies.
assert not np.may_share_memory(s0_flatten.data.lat, self.s0.data.lat)
s1_flatten = self.s1.flatten()
assert s1_flatten.shape == (self.s1.size,)
assert np.all(s1_flatten.data.lat == self.s1.data.lat.flatten())
assert not np.may_share_memory(s1_flatten.data.lon, self.s1.data.lat)
assert np.all(s1_flatten.obstime == self.s1.obstime.flatten())
assert not np.may_share_memory(s1_flatten.obstime.jd1, self.s1.obstime.jd1)
assert np.all(s1_flatten.location == self.s1.location.flatten())
assert not np.may_share_memory(s1_flatten.location, self.s1.location)
assert np.all(s1_flatten.temperature == self.s1.temperature.flatten())
assert not np.may_share_memory(s1_flatten.temperature, self.s1.temperature)
assert s1_flatten.pressure == self.s1.pressure
def test_transpose(self):
s0_transpose = self.s0.transpose()
assert s0_transpose.shape == (7, 6)
assert np.all(s0_transpose.data.lon == self.s0.data.lon.transpose())
assert np.may_share_memory(s0_transpose.data.lat, self.s0.data.lat)
s1_transpose = self.s1.transpose()
assert s1_transpose.shape == (7, 6)
assert np.all(s1_transpose.data.lat == self.s1.data.lat.transpose())
assert np.may_share_memory(s1_transpose.data.lon, self.s1.data.lon)
assert np.all(s1_transpose.obstime == self.s1.obstime.transpose())
assert np.may_share_memory(s1_transpose.obstime.jd1, self.s1.obstime.jd1)
assert np.all(s1_transpose.location == self.s1.location.transpose())
assert np.may_share_memory(s1_transpose.location, self.s1.location)
assert np.all(s1_transpose.temperature == self.s1.temperature.transpose())
assert np.may_share_memory(s1_transpose.temperature, self.s1.temperature)
assert s1_transpose.pressure == self.s1.pressure
# Only one check on T, since it just calls transpose anyway.
s1_T = self.s1.T
assert s1_T.shape == (7, 6)
assert np.all(s1_T.temperature == self.s1.temperature.T)
assert np.may_share_memory(s1_T.location, self.s1.location)
def test_diagonal(self):
s0_diagonal = self.s0.diagonal()
assert s0_diagonal.shape == (6,)
assert np.all(s0_diagonal.data.lat == self.s0.data.lat.diagonal())
assert np.may_share_memory(s0_diagonal.data.lat, self.s0.data.lat)
def test_swapaxes(self):
s1_swapaxes = self.s1.swapaxes(0, 1)
assert s1_swapaxes.shape == (7, 6)
assert np.all(s1_swapaxes.data.lat == self.s1.data.lat.swapaxes(0, 1))
assert np.may_share_memory(s1_swapaxes.data.lat, self.s1.data.lat)
assert np.all(s1_swapaxes.obstime == self.s1.obstime.swapaxes(0, 1))
assert np.may_share_memory(s1_swapaxes.obstime.jd1, self.s1.obstime.jd1)
assert np.all(s1_swapaxes.location == self.s1.location.swapaxes(0, 1))
assert s1_swapaxes.location.shape == (7, 6)
assert np.may_share_memory(s1_swapaxes.location, self.s1.location)
assert np.all(s1_swapaxes.temperature == self.s1.temperature.swapaxes(0, 1))
assert np.may_share_memory(s1_swapaxes.temperature, self.s1.temperature)
assert s1_swapaxes.pressure == self.s1.pressure
def test_reshape(self):
s0_reshape = self.s0.reshape(2, 3, 7)
assert s0_reshape.shape == (2, 3, 7)
assert np.all(s0_reshape.data.lon == self.s0.data.lon.reshape(2, 3, 7))
assert np.all(s0_reshape.data.lat == self.s0.data.lat.reshape(2, 3, 7))
assert np.may_share_memory(s0_reshape.data.lon, self.s0.data.lon)
assert np.may_share_memory(s0_reshape.data.lat, self.s0.data.lat)
s1_reshape = self.s1.reshape(3, 2, 7)
assert s1_reshape.shape == (3, 2, 7)
assert np.all(s1_reshape.data.lat == self.s1.data.lat.reshape(3, 2, 7))
assert np.may_share_memory(s1_reshape.data.lat, self.s1.data.lat)
assert np.all(s1_reshape.obstime == self.s1.obstime.reshape(3, 2, 7))
assert np.may_share_memory(s1_reshape.obstime.jd1, self.s1.obstime.jd1)
assert np.all(s1_reshape.location == self.s1.location.reshape(3, 2, 7))
assert np.may_share_memory(s1_reshape.location, self.s1.location)
assert np.all(s1_reshape.temperature == self.s1.temperature.reshape(3, 2, 7))
assert np.may_share_memory(s1_reshape.temperature, self.s1.temperature)
assert s1_reshape.pressure == self.s1.pressure
# For reshape(3, 14), copying is necessary for lon, lat, location, time
s1_reshape2 = self.s1.reshape(3, 14)
assert s1_reshape2.shape == (3, 14)
assert np.all(s1_reshape2.data.lon == self.s1.data.lon.reshape(3, 14))
assert not np.may_share_memory(s1_reshape2.data.lon, self.s1.data.lon)
assert np.all(s1_reshape2.obstime == self.s1.obstime.reshape(3, 14))
assert not np.may_share_memory(s1_reshape2.obstime.jd1, self.s1.obstime.jd1)
assert np.all(s1_reshape2.location == self.s1.location.reshape(3, 14))
assert not np.may_share_memory(s1_reshape2.location, self.s1.location)
assert np.all(s1_reshape2.temperature == self.s1.temperature.reshape(3, 14))
assert np.may_share_memory(s1_reshape2.temperature, self.s1.temperature)
assert s1_reshape2.pressure == self.s1.pressure
s2_reshape = self.s2.reshape(3, 2, 7)
assert s2_reshape.shape == (3, 2, 7)
assert np.all(s2_reshape.data.lon == self.s2.data.lon.reshape(3, 2, 7))
assert np.may_share_memory(s2_reshape.data.lat, self.s2.data.lat)
assert np.all(s2_reshape.obstime == self.s2.obstime.reshape(3, 2, 7))
assert np.may_share_memory(s2_reshape.obstime.jd1, self.s2.obstime.jd1)
assert np.all(
s2_reshape.obsgeoloc.xyz == self.s2.obsgeoloc.reshape(3, 2, 7).xyz
)
assert np.may_share_memory(s2_reshape.obsgeoloc.x, self.s2.obsgeoloc.x)
s3_reshape = self.s3.reshape(3, 2, 7)
assert s3_reshape.shape == (3, 2, 7)
assert np.all(s3_reshape.obstime == self.s3.obstime.reshape(3, 2, 7))
assert np.may_share_memory(s3_reshape.obstime.jd1, self.s3.obstime.jd1)
assert np.all(
s3_reshape.obsgeoloc.xyz == self.s3.obsgeoloc.reshape(3, 2, 7).xyz
)
assert np.may_share_memory(s3_reshape.obsgeoloc.x, self.s3.obsgeoloc.x)
sc_reshape = self.sc.reshape(3, 2, 7)
assert sc_reshape.shape == (3, 2, 7)
assert np.all(sc_reshape.data.lon == self.sc.data.lon.reshape(3, 2, 7))
assert np.may_share_memory(sc_reshape.data.lat, self.sc.data.lat)
assert np.all(sc_reshape.obstime == self.sc.obstime.reshape(3, 2, 7))
assert np.may_share_memory(sc_reshape.obstime.jd1, self.sc.obstime.jd1)
assert np.all(
sc_reshape.obsgeoloc.xyz == self.sc.obsgeoloc.reshape(3, 2, 7).xyz
)
assert np.may_share_memory(sc_reshape.obsgeoloc.x, self.sc.obsgeoloc.x)
# For reshape(3, 14), the arrays all need to be copied.
sc_reshape2 = self.sc.reshape(3, 14)
assert sc_reshape2.shape == (3, 14)
assert np.all(sc_reshape2.data.lon == self.sc.data.lon.reshape(3, 14))
assert not np.may_share_memory(sc_reshape2.data.lat, self.sc.data.lat)
assert np.all(sc_reshape2.obstime == self.sc.obstime.reshape(3, 14))
assert not np.may_share_memory(sc_reshape2.obstime.jd1, self.sc.obstime.jd1)
assert np.all(sc_reshape2.obsgeoloc.xyz == self.sc.obsgeoloc.reshape(3, 14).xyz)
assert not np.may_share_memory(sc_reshape2.obsgeoloc.x, self.sc.obsgeoloc.x)
def test_squeeze(self):
s0_squeeze = self.s0.reshape(3, 1, 2, 1, 7).squeeze()
assert s0_squeeze.shape == (3, 2, 7)
assert np.all(s0_squeeze.data.lat == self.s0.data.lat.reshape(3, 2, 7))
assert np.may_share_memory(s0_squeeze.data.lat, self.s0.data.lat)
def test_add_dimension(self, method):
if method:
s0_adddim = self.s0[:, np.newaxis, :]
else:
s0_adddim = np.expand_dims(self.s0, 1)
assert s0_adddim.shape == (6, 1, 7)
assert np.all(s0_adddim.data.lon == self.s0.data.lon[:, np.newaxis, :])
assert np.may_share_memory(s0_adddim.data.lat, self.s0.data.lat)
def test_take(self):
s0_take = self.s0.take((5, 2))
assert s0_take.shape == (2,)
assert np.all(s0_take.data.lon == self.s0.data.lon.take((5, 2)))
# Much more detailed tests of shape manipulation via numpy functions done
# in test_representation_methods.
@needs_array_function
def test_broadcast_to(self):
s1_broadcast = np.broadcast_to(self.s1, (20, 6, 7))
assert s1_broadcast.shape == (20, 6, 7)
assert np.all(s1_broadcast.data.lon == self.s1.data.lon[np.newaxis])
assert np.may_share_memory(s1_broadcast.data.lat, self.s1.data.lat)
|
c5fa21f4577562677a9559e3584ab257ccdf8da044f8ffb5af4c9ab59a7817ca | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import builtin_frames as bf
from astropy.coordinates import galactocentric_frame_defaults
from astropy.coordinates import representation as r
from astropy.coordinates.builtin_frames import CIRS, ICRS, Galactic, Galactocentric
from astropy.coordinates.errors import ConvertError
from astropy.units import allclose as quantity_allclose
def test_api():
# transform observed Barycentric velocities to full-space Galactocentric
with galactocentric_frame_defaults.set("latest"):
gc_frame = Galactocentric()
icrs = ICRS(
ra=151.0 * u.deg,
dec=-16 * u.deg,
distance=101 * u.pc,
pm_ra_cosdec=21 * u.mas / u.yr,
pm_dec=-71 * u.mas / u.yr,
radial_velocity=71 * u.km / u.s,
)
icrs.transform_to(gc_frame)
# transform a set of ICRS proper motions to Galactic
icrs = ICRS(
ra=151.0 * u.deg,
dec=-16 * u.deg,
pm_ra_cosdec=21 * u.mas / u.yr,
pm_dec=-71 * u.mas / u.yr,
)
icrs.transform_to(Galactic())
# transform a Barycentric RV to a GSR RV
icrs = ICRS(
ra=151.0 * u.deg,
dec=-16 * u.deg,
distance=1.0 * u.pc,
pm_ra_cosdec=0 * u.mas / u.yr,
pm_dec=0 * u.mas / u.yr,
radial_velocity=71 * u.km / u.s,
)
icrs.transform_to(Galactocentric())
# fmt: off
all_kwargs = [
dict(ra=37.4*u.deg, dec=-55.8*u.deg),
dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc),
dict(ra=37.4*u.deg, dec=-55.8*u.deg,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr),
dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr),
dict(ra=37.4*u.deg, dec=-55.8*u.deg,
radial_velocity=105.7*u.km/u.s),
dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
radial_velocity=105.7*u.km/u.s),
dict(ra=37.4*u.deg, dec=-55.8*u.deg,
radial_velocity=105.7*u.km/u.s,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr),
dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr,
radial_velocity=105.7*u.km/u.s),
# Now test other representation/differential types:
dict(x=100.*u.pc, y=200*u.pc, z=300*u.pc,
representation_type='cartesian'),
dict(x=100.*u.pc, y=200*u.pc, z=300*u.pc,
representation_type=r.CartesianRepresentation),
dict(x=100.*u.pc, y=200*u.pc, z=300*u.pc,
v_x=100.*u.km/u.s, v_y=200*u.km/u.s, v_z=300*u.km/u.s,
representation_type=r.CartesianRepresentation,
differential_type=r.CartesianDifferential),
dict(x=100.*u.pc, y=200*u.pc, z=300*u.pc,
v_x=100.*u.km/u.s, v_y=200*u.km/u.s, v_z=300*u.km/u.s,
representation_type=r.CartesianRepresentation,
differential_type='cartesian'),
]
# fmt: on
@pytest.mark.parametrize("kwargs", all_kwargs)
def test_all_arg_options(kwargs):
# Above is a list of all possible valid combinations of arguments.
# Here we do a simple thing and just verify that passing them in, we have
# access to the relevant attributes from the resulting object
icrs = ICRS(**kwargs)
gal = icrs.transform_to(Galactic())
repr_gal = repr(gal)
for k in kwargs:
if k == "differential_type":
continue
getattr(icrs, k)
if "pm_ra_cosdec" in kwargs: # should have both
assert "pm_l_cosb" in repr_gal
assert "pm_b" in repr_gal
assert "mas / yr" in repr_gal
if "radial_velocity" not in kwargs:
assert "radial_velocity" not in repr_gal
if "radial_velocity" in kwargs:
assert "radial_velocity" in repr_gal
assert "km / s" in repr_gal
if "pm_ra_cosdec" not in kwargs:
assert "pm_l_cosb" not in repr_gal
assert "pm_b" not in repr_gal
@pytest.mark.parametrize(
"cls,lon,lat",
[
[bf.ICRS, "ra", "dec"],
[bf.FK4, "ra", "dec"],
[bf.FK4NoETerms, "ra", "dec"],
[bf.FK5, "ra", "dec"],
[bf.GCRS, "ra", "dec"],
[bf.HCRS, "ra", "dec"],
[bf.LSR, "ra", "dec"],
[bf.CIRS, "ra", "dec"],
[bf.Galactic, "l", "b"],
[bf.AltAz, "az", "alt"],
[bf.Supergalactic, "sgl", "sgb"],
[bf.GalacticLSR, "l", "b"],
[bf.HeliocentricMeanEcliptic, "lon", "lat"],
[bf.GeocentricMeanEcliptic, "lon", "lat"],
[bf.BarycentricMeanEcliptic, "lon", "lat"],
[bf.PrecessedGeocentric, "ra", "dec"],
],
)
def test_expected_arg_names(cls, lon, lat):
kwargs = {
lon: 37.4 * u.deg,
lat: -55.8 * u.deg,
"distance": 150 * u.pc,
f"pm_{lon}_cos{lat}": -21.2 * u.mas / u.yr,
f"pm_{lat}": 17.1 * u.mas / u.yr,
"radial_velocity": 105.7 * u.km / u.s,
}
frame = cls(**kwargs)
# these data are extracted from the vizier copy of XHIP:
# http://vizier.u-strasbg.fr/viz-bin/VizieR-3?-source=+V/137A/XHIP
_xhip_head = """
------ ------------ ------------ -------- -------- ------------ ------------ ------- -------- -------- ------- ------ ------ ------
R D pmRA pmDE Di pmGLon pmGLat RV U V W
HIP AJ2000 (deg) EJ2000 (deg) (mas/yr) (mas/yr) GLon (deg) GLat (deg) st (pc) (mas/yr) (mas/yr) (km/s) (km/s) (km/s) (km/s)
------ ------------ ------------ -------- -------- ------------ ------------ ------- -------- -------- ------- ------ ------ ------
"""[
1:-1
]
_xhip_data = """
19 000.05331690 +38.30408633 -3.17 -15.37 112.00026470 -23.47789171 247.12 -6.40 -14.33 6.30 7.3 2.0 -17.9
20 000.06295067 +23.52928427 36.11 -22.48 108.02779304 -37.85659811 95.90 29.35 -30.78 37.80 -19.3 16.1 -34.2
21 000.06623581 +08.00723430 61.48 -0.23 101.69697120 -52.74179515 183.68 58.06 -20.23 -11.72 -45.2 -30.9 -1.3
24917 080.09698238 -33.39874984 -4.30 13.40 236.92324669 -32.58047131 107.38 -14.03 -1.15 36.10 -22.4 -21.3 -19.9
59207 182.13915108 +65.34963517 18.17 5.49 130.04157185 51.18258601 56.00 -18.98 -0.49 5.70 1.5 6.1 4.4
87992 269.60730667 +36.87462906 -89.58 72.46 62.98053142 25.90148234 129.60 45.64 105.79 -4.00 -39.5 -15.8 56.7
115110 349.72322473 -28.74087144 48.86 -9.25 23.00447250 -69.52799804 116.87 -8.37 -49.02 15.00 -16.8 -12.2 -23.6
"""[
1:-1
]
# in principal we could parse the above as a table, but doing it "manually"
# makes this test less tied to Table working correctly
@pytest.mark.parametrize(
"hip,ra,dec,pmra,pmdec,glon,glat,dist,pmglon,pmglat,rv,U,V,W",
[[float(val) for val in row.split()] for row in _xhip_data.split("\n")],
)
def test_xhip_galactic(
hip, ra, dec, pmra, pmdec, glon, glat, dist, pmglon, pmglat, rv, U, V, W
):
i = ICRS(
ra * u.deg,
dec * u.deg,
dist * u.pc,
pm_ra_cosdec=pmra * u.marcsec / u.yr,
pm_dec=pmdec * u.marcsec / u.yr,
radial_velocity=rv * u.km / u.s,
)
g = i.transform_to(Galactic())
# precision is limited by 2-deciimal digit string representation of pms
assert quantity_allclose(
g.pm_l_cosb, pmglon * u.marcsec / u.yr, atol=0.01 * u.marcsec / u.yr
)
assert quantity_allclose(
g.pm_b, pmglat * u.marcsec / u.yr, atol=0.01 * u.marcsec / u.yr
)
# make sure UVW also makes sense
uvwg = g.cartesian.differentials["s"]
# precision is limited by 1-decimal digit string representation of vels
assert quantity_allclose(uvwg.d_x, U * u.km / u.s, atol=0.1 * u.km / u.s)
assert quantity_allclose(uvwg.d_y, V * u.km / u.s, atol=0.1 * u.km / u.s)
assert quantity_allclose(uvwg.d_z, W * u.km / u.s, atol=0.1 * u.km / u.s)
# fmt: off
@pytest.mark.parametrize('kwargs,expect_success', [
[dict(ra=37.4*u.deg, dec=-55.8*u.deg), False],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc), True],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr), False],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg, radial_velocity=105.7*u.km/u.s), False],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
radial_velocity=105.7*u.km/u.s), False],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg,
radial_velocity=105.7*u.km/u.s,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr), False],
[dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc,
pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr,
radial_velocity=105.7*u.km/u.s), True]
])
# fmt: on
def test_frame_affinetransform(kwargs, expect_success):
"""There are already tests in test_transformations.py that check that
an AffineTransform fails without full-space data, but this just checks that
things work as expected at the frame level as well.
"""
with galactocentric_frame_defaults.set("latest"):
icrs = ICRS(**kwargs)
if expect_success:
_ = icrs.transform_to(Galactocentric())
else:
with pytest.raises(ConvertError):
icrs.transform_to(Galactocentric())
def test_differential_type_arg():
"""
Test passing in an explicit differential class to the initializer or
changing the differential class via set_representation_cls
"""
from astropy.coordinates.builtin_frames import ICRS
icrs = ICRS(
ra=1 * u.deg,
dec=60 * u.deg,
pm_ra=10 * u.mas / u.yr,
pm_dec=-11 * u.mas / u.yr,
differential_type=r.UnitSphericalDifferential,
)
assert icrs.pm_ra == 10 * u.mas / u.yr
icrs = ICRS(
ra=1 * u.deg,
dec=60 * u.deg,
pm_ra=10 * u.mas / u.yr,
pm_dec=-11 * u.mas / u.yr,
differential_type={"s": r.UnitSphericalDifferential},
)
assert icrs.pm_ra == 10 * u.mas / u.yr
icrs = ICRS(
ra=1 * u.deg,
dec=60 * u.deg,
pm_ra_cosdec=10 * u.mas / u.yr,
pm_dec=-11 * u.mas / u.yr,
)
icrs.set_representation_cls(s=r.UnitSphericalDifferential)
assert quantity_allclose(icrs.pm_ra, 20 * u.mas / u.yr)
# incompatible representation and differential
with pytest.raises(TypeError):
ICRS(
ra=1 * u.deg,
dec=60 * u.deg,
v_x=1 * u.km / u.s,
v_y=-2 * u.km / u.s,
v_z=-2 * u.km / u.s,
differential_type=r.CartesianDifferential,
)
# specify both
icrs = ICRS(
x=1 * u.pc,
y=2 * u.pc,
z=3 * u.pc,
v_x=1 * u.km / u.s,
v_y=2 * u.km / u.s,
v_z=3 * u.km / u.s,
representation_type=r.CartesianRepresentation,
differential_type=r.CartesianDifferential,
)
assert icrs.x == 1 * u.pc
assert icrs.y == 2 * u.pc
assert icrs.z == 3 * u.pc
assert icrs.v_x == 1 * u.km / u.s
assert icrs.v_y == 2 * u.km / u.s
assert icrs.v_z == 3 * u.km / u.s
def test_slicing_preserves_differential():
icrs = ICRS(
ra=37.4 * u.deg,
dec=-55.8 * u.deg,
distance=150 * u.pc,
pm_ra_cosdec=-21.2 * u.mas / u.yr,
pm_dec=17.1 * u.mas / u.yr,
radial_velocity=105.7 * u.km / u.s,
)
icrs2 = icrs.reshape(1, 1)[:1, 0]
for name in icrs.representation_component_names.keys():
assert getattr(icrs, name) == getattr(icrs2, name)[0]
for name in icrs.get_representation_component_names("s").keys():
assert getattr(icrs, name) == getattr(icrs2, name)[0]
def test_shorthand_attributes():
# Check that attribute access works
# for array data:
n = 4
icrs1 = ICRS(
ra=np.random.uniform(0, 360, n) * u.deg,
dec=np.random.uniform(-90, 90, n) * u.deg,
distance=100 * u.pc,
pm_ra_cosdec=np.random.normal(0, 100, n) * u.mas / u.yr,
pm_dec=np.random.normal(0, 100, n) * u.mas / u.yr,
radial_velocity=np.random.normal(0, 100, n) * u.km / u.s,
)
v = icrs1.velocity
pm = icrs1.proper_motion
assert quantity_allclose(pm[0], icrs1.pm_ra_cosdec)
assert quantity_allclose(pm[1], icrs1.pm_dec)
# for scalar data:
icrs2 = ICRS(
ra=37.4 * u.deg,
dec=-55.8 * u.deg,
distance=150 * u.pc,
pm_ra_cosdec=-21.2 * u.mas / u.yr,
pm_dec=17.1 * u.mas / u.yr,
radial_velocity=105.7 * u.km / u.s,
)
v = icrs2.velocity
pm = icrs2.proper_motion
assert quantity_allclose(pm[0], icrs2.pm_ra_cosdec)
assert quantity_allclose(pm[1], icrs2.pm_dec)
# check that it fails where we expect:
# no distance
rv = 105.7 * u.km / u.s
icrs3 = ICRS(
ra=37.4 * u.deg,
dec=-55.8 * u.deg,
pm_ra_cosdec=-21.2 * u.mas / u.yr,
pm_dec=17.1 * u.mas / u.yr,
radial_velocity=rv,
)
with pytest.raises(ValueError):
icrs3.velocity
icrs3.set_representation_cls("cartesian")
assert hasattr(icrs3, "radial_velocity")
assert quantity_allclose(icrs3.radial_velocity, rv)
icrs4 = ICRS(
x=30 * u.pc,
y=20 * u.pc,
z=11 * u.pc,
v_x=10 * u.km / u.s,
v_y=10 * u.km / u.s,
v_z=10 * u.km / u.s,
representation_type=r.CartesianRepresentation,
differential_type=r.CartesianDifferential,
)
icrs4.radial_velocity
def test_negative_distance():
"""Regression test: #7408
Make sure that negative parallaxes turned into distances are handled right
"""
RA = 150 * u.deg
DEC = -11 * u.deg
c = ICRS(
ra=RA,
dec=DEC,
distance=(-10 * u.mas).to(u.pc, u.parallax()),
pm_ra_cosdec=10 * u.mas / u.yr,
pm_dec=10 * u.mas / u.yr,
)
assert quantity_allclose(c.ra, RA)
assert quantity_allclose(c.dec, DEC)
c = ICRS(ra=RA, dec=DEC, distance=(-10 * u.mas).to(u.pc, u.parallax()))
assert quantity_allclose(c.ra, RA)
assert quantity_allclose(c.dec, DEC)
def test_velocity_units():
"""Check that the differential data given has compatible units
with the time-derivative of representation data"""
msg = (
'x has unit "" with physical type "dimensionless", but v_x has '
'incompatible unit "" with physical type "dimensionless" instead '
r'of the expected "frequency"\.'
)
with pytest.raises(ValueError, match=msg):
c = ICRS(
x=1,
y=2,
z=3,
v_x=1,
v_y=2,
v_z=3,
representation_type=r.CartesianRepresentation,
differential_type=r.CartesianDifferential,
)
def test_frame_with_velocity_without_distance_can_be_transformed():
frame = CIRS(
1 * u.deg, 2 * u.deg, pm_dec=1 * u.mas / u.yr, pm_ra_cosdec=2 * u.mas / u.yr
)
rep = frame.transform_to(ICRS())
assert "<ICRS Coordinate: (ra, dec, distance) in" in repr(rep)
|
137956f2c73d0d8a5856c4576bf546dfaded3381126fa0e868cfdbdcdb11ae5f | """
Tests the Angle string formatting capabilities. SkyCoord formatting is in
test_sky_coord
"""
import pytest
from astropy import units as u
from astropy.coordinates.angles import Angle
def test_to_string_precision():
# There are already some tests in test_api.py, but this is a regression
# test for the bug in issue #1319 which caused incorrect formatting of the
# seconds for precision=0
angle = Angle(-1.23456789, unit=u.degree)
assert angle.to_string(precision=3) == "-1d14m04.444s"
assert angle.to_string(precision=1) == "-1d14m04.4s"
assert angle.to_string(precision=0) == "-1d14m04s"
angle2 = Angle(-1.23456789, unit=u.hourangle)
assert angle2.to_string(precision=3, unit=u.hour) == "-1h14m04.444s"
assert angle2.to_string(precision=1, unit=u.hour) == "-1h14m04.4s"
assert angle2.to_string(precision=0, unit=u.hour) == "-1h14m04s"
# Regression test for #7141
angle3 = Angle(-0.5, unit=u.degree)
assert angle3.to_string(precision=0, fields=3) == "-0d30m00s"
assert angle3.to_string(precision=0, fields=2) == "-0d30m"
assert angle3.to_string(precision=0, fields=1) == "-1d"
def test_to_string_decimal():
# There are already some tests in test_api.py, but this is a regression
# test for the bug in issue #1323 which caused decimal formatting to not
# work
angle1 = Angle(2.0, unit=u.degree)
assert angle1.to_string(decimal=True, precision=3) == "2.000"
assert angle1.to_string(decimal=True, precision=1) == "2.0"
assert angle1.to_string(decimal=True, precision=0) == "2"
angle2 = Angle(3.0, unit=u.hourangle)
assert angle2.to_string(decimal=True, precision=3) == "3.000"
assert angle2.to_string(decimal=True, precision=1) == "3.0"
assert angle2.to_string(decimal=True, precision=0) == "3"
angle3 = Angle(4.0, unit=u.radian)
assert angle3.to_string(decimal=True, precision=3) == "4.000"
assert angle3.to_string(decimal=True, precision=1) == "4.0"
assert angle3.to_string(decimal=True, precision=0) == "4"
with pytest.raises(ValueError, match="sexagesimal notation"):
angle3.to_string(decimal=True, sep="abc")
def test_to_string_formats():
a = Angle(1.113355, unit=u.deg)
latex_str = r"$1^\circ06{}^\prime48.078{}^{\prime\prime}$"
assert a.to_string(format="latex") == latex_str
assert a.to_string(format="latex_inline") == latex_str
assert a.to_string(format="unicode") == "1°06′48.078″"
a = Angle(1.113355, unit=u.hour)
latex_str = r"$1^{\mathrm{h}}06^{\mathrm{m}}48.078^{\mathrm{s}}$"
assert a.to_string(format="latex") == latex_str
assert a.to_string(format="latex_inline") == latex_str
assert a.to_string(format="unicode") == "1ʰ06ᵐ48.078ˢ"
a = Angle(1.113355, unit=u.radian)
assert a.to_string(format="latex") == r"$1.11336\mathrm{rad}$"
assert a.to_string(format="latex_inline") == r"$1.11336\mathrm{rad}$"
assert a.to_string(format="unicode") == "1.11336rad"
def test_to_string_decimal_formats():
angle1 = Angle(2.0, unit=u.degree)
assert angle1.to_string(decimal=True, format="generic") == "2deg"
assert angle1.to_string(decimal=True, format="latex") == "$2\\mathrm{{}^{\\circ}}$"
assert angle1.to_string(decimal=True, format="unicode") == "2°"
angle2 = Angle(3.0, unit=u.hourangle)
assert angle2.to_string(decimal=True, format="generic") == "3hourangle"
assert angle2.to_string(decimal=True, format="latex") == "$3\\mathrm{{}^{h}}$"
assert angle2.to_string(decimal=True, format="unicode") == "3ʰ"
angle3 = Angle(4.0, unit=u.radian)
assert angle3.to_string(decimal=True, format="generic") == "4rad"
assert angle3.to_string(decimal=True, format="latex") == "$4\\mathrm{rad}$"
assert angle3.to_string(decimal=True, format="unicode") == "4rad"
with pytest.raises(ValueError, match="Unknown format"):
angle3.to_string(decimal=True, format="myformat")
def test_to_string_fields():
a = Angle(1.113355, unit=u.deg)
assert a.to_string(fields=1) == r"1d"
assert a.to_string(fields=2) == r"1d07m"
assert a.to_string(fields=3) == r"1d06m48.078s"
def test_to_string_padding():
a = Angle(0.5653, unit=u.deg)
assert a.to_string(unit="deg", sep=":", pad=True) == r"00:33:55.08"
# Test to make sure negative angles are padded correctly
a = Angle(-0.5653, unit=u.deg)
assert a.to_string(unit="deg", sep=":", pad=True) == r"-00:33:55.08"
def test_sexagesimal_rounding_up():
a = Angle(359.999999999999, unit=u.deg)
assert a.to_string(precision=None) == "360d00m00s"
assert a.to_string(precision=4) == "360d00m00.0000s"
assert a.to_string(precision=5) == "360d00m00.00000s"
assert a.to_string(precision=6) == "360d00m00.000000s"
assert a.to_string(precision=7) == "360d00m00.0000000s"
assert a.to_string(precision=8) == "360d00m00.00000000s"
assert a.to_string(precision=9) == "359d59m59.999999996s"
a = Angle(3.999999, unit=u.deg)
assert a.to_string(fields=2, precision=None) == "4d00m"
assert a.to_string(fields=2, precision=1) == "4d00m"
assert a.to_string(fields=2, precision=5) == "4d00m"
assert a.to_string(fields=1, precision=1) == "4d"
assert a.to_string(fields=1, precision=5) == "4d"
def test_to_string_scalar():
a = Angle(1.113355, unit=u.deg)
assert isinstance(a.to_string(), str)
def test_to_string_radian_with_precision():
"""
Regression test for a bug that caused ``to_string`` to crash for angles in
radians when specifying the precision.
"""
# Check that specifying the precision works
a = Angle(3.0, unit=u.rad)
assert a.to_string(precision=3, sep="fromunit") == "3.000rad"
def test_sexagesimal_round_down():
a1 = Angle(1, u.deg).to(u.hourangle)
a2 = Angle(2, u.deg)
assert a1.to_string() == "0h04m00s"
assert a2.to_string() == "2d00m00s"
def test_to_string_fields_colon():
a = Angle(1.113355, unit=u.deg)
assert a.to_string(fields=2, sep=":") == "1:07"
assert a.to_string(fields=3, sep=":") == "1:06:48.078"
assert a.to_string(fields=1, sep=":") == "1"
|
4acb51d5cdbf34d8274816412c24b931864444090d48f14b98829bd0e9eabc58 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import EarthLocation, SkyCoord
from astropy.coordinates.builtin_frames import (
FK5,
ICRS,
AltAz,
Galactic,
SkyOffsetFrame,
)
from astropy.coordinates.distances import Distance
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.time import Time
def test_altaz_attribute_transforms():
"""Test transforms between AltAz frames with different attributes."""
el1 = EarthLocation(0 * u.deg, 0 * u.deg, 0 * u.m)
origin1 = AltAz(
0 * u.deg, 0 * u.deg, obstime=Time("2000-01-01T12:00:00"), location=el1
)
frame1 = SkyOffsetFrame(origin=origin1)
coo1 = SkyCoord(1 * u.deg, 1 * u.deg, frame=frame1)
el2 = EarthLocation(0 * u.deg, 0 * u.deg, 0 * u.m)
origin2 = AltAz(
0 * u.deg, 0 * u.deg, obstime=Time("2000-01-01T11:00:00"), location=el2
)
frame2 = SkyOffsetFrame(origin=origin2)
coo2 = coo1.transform_to(frame2)
coo2_expected = [1.22522446, 0.70624298] * u.deg
assert_allclose(
[coo2.lon.wrap_at(180 * u.deg), coo2.lat], coo2_expected, atol=convert_precision
)
el3 = EarthLocation(0 * u.deg, 90 * u.deg, 0 * u.m)
origin3 = AltAz(
0 * u.deg, 90 * u.deg, obstime=Time("2000-01-01T12:00:00"), location=el3
)
frame3 = SkyOffsetFrame(origin=origin3)
coo3 = coo2.transform_to(frame3)
assert_allclose(
[coo3.lon.wrap_at(180 * u.deg), coo3.lat],
[1 * u.deg, 1 * u.deg],
atol=convert_precision,
)
@pytest.mark.parametrize(
"inradec,expectedlatlon, tolsep",
[
((45, 45) * u.deg, (0, 0) * u.deg, 0.001 * u.arcsec),
((45, 0) * u.deg, (0, -45) * u.deg, 0.001 * u.arcsec),
((45, 90) * u.deg, (0, 45) * u.deg, 0.001 * u.arcsec),
((46, 45) * u.deg, (1 * np.cos(45 * u.deg), 0) * u.deg, 16 * u.arcsec),
],
)
def test_skyoffset(inradec, expectedlatlon, tolsep, originradec=(45, 45) * u.deg):
origin = ICRS(*originradec)
skyoffset_frame = SkyOffsetFrame(origin=origin)
skycoord = SkyCoord(*inradec, frame=ICRS)
skycoord_inaf = skycoord.transform_to(skyoffset_frame)
assert hasattr(skycoord_inaf, "lon")
assert hasattr(skycoord_inaf, "lat")
expected = SkyCoord(*expectedlatlon, frame=skyoffset_frame)
assert skycoord_inaf.separation(expected) < tolsep
# Check we can also transform back (regression test for gh-11254).
roundtrip = skycoord_inaf.transform_to(ICRS())
assert roundtrip.separation(skycoord) < 1 * u.uas
def test_skyoffset_functional_ra():
# we do the 12)[1:-1] business because sometimes machine precision issues
# lead to results that are either ~0 or ~360, which mucks up the final
# comparison and leads to spurious failures. So this just avoids that by
# staying away from the edges
input_ra = np.linspace(0, 360, 12)[1:-1]
input_dec = np.linspace(-90, 90, 12)[1:-1]
icrs_coord = ICRS(ra=input_ra * u.deg, dec=input_dec * u.deg, distance=1.0 * u.kpc)
for ra in np.linspace(0, 360, 24):
# expected rotation
expected = ICRS(
ra=np.linspace(0 - ra, 360 - ra, 12)[1:-1] * u.deg,
dec=np.linspace(-90, 90, 12)[1:-1] * u.deg,
distance=1.0 * u.kpc,
)
expected_xyz = expected.cartesian.xyz
# actual transformation to the frame
skyoffset_frame = SkyOffsetFrame(origin=ICRS(ra * u.deg, 0 * u.deg))
actual = icrs_coord.transform_to(skyoffset_frame)
actual_xyz = actual.cartesian.xyz
# back to ICRS
roundtrip = actual.transform_to(ICRS())
roundtrip_xyz = roundtrip.cartesian.xyz
# Verify
assert_allclose(actual_xyz, expected_xyz, atol=1e-5 * u.kpc)
assert_allclose(icrs_coord.ra, roundtrip.ra, atol=1e-5 * u.deg)
assert_allclose(icrs_coord.dec, roundtrip.dec, atol=1e-5 * u.deg)
assert_allclose(icrs_coord.distance, roundtrip.distance, atol=1e-5 * u.kpc)
def test_skyoffset_functional_dec():
# we do the 12)[1:-1] business because sometimes machine precision issues
# lead to results that are either ~0 or ~360, which mucks up the final
# comparison and leads to spurious failures. So this just avoids that by
# staying away from the edges
input_ra = np.linspace(0, 360, 12)[1:-1]
input_dec = np.linspace(-90, 90, 12)[1:-1]
input_ra_rad = np.deg2rad(input_ra)
input_dec_rad = np.deg2rad(input_dec)
icrs_coord = ICRS(ra=input_ra * u.deg, dec=input_dec * u.deg, distance=1.0 * u.kpc)
# Dec rotations
# Done in xyz space because dec must be [-90,90]
for dec in np.linspace(-90, 90, 13):
# expected rotation
dec_rad = -np.deg2rad(dec)
# fmt: off
expected_x = (-np.sin(input_dec_rad) * np.sin(dec_rad) +
np.cos(input_ra_rad) * np.cos(input_dec_rad) * np.cos(dec_rad))
expected_y = (np.sin(input_ra_rad) * np.cos(input_dec_rad))
expected_z = (np.sin(input_dec_rad) * np.cos(dec_rad) +
np.sin(dec_rad) * np.cos(input_ra_rad) * np.cos(input_dec_rad))
# fmt: on
expected = SkyCoord(
x=expected_x,
y=expected_y,
z=expected_z,
unit="kpc",
representation_type="cartesian",
)
expected_xyz = expected.cartesian.xyz
# actual transformation to the frame
skyoffset_frame = SkyOffsetFrame(origin=ICRS(0 * u.deg, dec * u.deg))
actual = icrs_coord.transform_to(skyoffset_frame)
actual_xyz = actual.cartesian.xyz
# back to ICRS
roundtrip = actual.transform_to(ICRS())
# Verify
assert_allclose(actual_xyz, expected_xyz, atol=1e-5 * u.kpc)
assert_allclose(icrs_coord.ra, roundtrip.ra, atol=1e-5 * u.deg)
assert_allclose(icrs_coord.dec, roundtrip.dec, atol=1e-5 * u.deg)
assert_allclose(icrs_coord.distance, roundtrip.distance, atol=1e-5 * u.kpc)
def test_skyoffset_functional_ra_dec():
# we do the 12)[1:-1] business because sometimes machine precision issues
# lead to results that are either ~0 or ~360, which mucks up the final
# comparison and leads to spurious failures. So this just avoids that by
# staying away from the edges
input_ra = np.linspace(0, 360, 12)[1:-1]
input_dec = np.linspace(-90, 90, 12)[1:-1]
input_ra_rad = np.deg2rad(input_ra)
input_dec_rad = np.deg2rad(input_dec)
icrs_coord = ICRS(ra=input_ra * u.deg, dec=input_dec * u.deg, distance=1.0 * u.kpc)
for ra in np.linspace(0, 360, 10):
for dec in np.linspace(-90, 90, 5):
# expected rotation
dec_rad = -np.deg2rad(dec)
ra_rad = np.deg2rad(ra)
# fmt: off
expected_x = (-np.sin(input_dec_rad) * np.sin(dec_rad) +
np.cos(input_ra_rad) * np.cos(input_dec_rad) * np.cos(dec_rad) * np.cos(ra_rad) +
np.sin(input_ra_rad) * np.cos(input_dec_rad) * np.cos(dec_rad) * np.sin(ra_rad))
expected_y = (np.sin(input_ra_rad) * np.cos(input_dec_rad) * np.cos(ra_rad) -
np.cos(input_ra_rad) * np.cos(input_dec_rad) * np.sin(ra_rad))
expected_z = (np.sin(input_dec_rad) * np.cos(dec_rad) +
np.sin(dec_rad) * np.cos(ra_rad) * np.cos(input_ra_rad) * np.cos(input_dec_rad) +
np.sin(dec_rad) * np.sin(ra_rad) * np.sin(input_ra_rad) * np.cos(input_dec_rad))
# fmp: on
expected = SkyCoord(
x=expected_x,
y=expected_y,
z=expected_z,
unit='kpc',
representation_type='cartesian',
)
expected_xyz = expected.cartesian.xyz
# actual transformation to the frame
skyoffset_frame = SkyOffsetFrame(origin=ICRS(ra * u.deg, dec * u.deg))
actual = icrs_coord.transform_to(skyoffset_frame)
actual_xyz = actual.cartesian.xyz
# back to ICRS
roundtrip = actual.transform_to(ICRS())
# Verify
assert_allclose(actual_xyz, expected_xyz, atol=1e-5 * u.kpc)
assert_allclose(icrs_coord.ra, roundtrip.ra, atol=1e-4 * u.deg)
assert_allclose(icrs_coord.dec, roundtrip.dec, atol=1e-5 * u.deg)
assert_allclose(icrs_coord.distance, roundtrip.distance, atol=1e-5 * u.kpc)
def test_skycoord_skyoffset_frame():
m31 = SkyCoord(10.6847083, 41.26875, frame="icrs", unit=u.deg)
m33 = SkyCoord(23.4621, 30.6599417, frame="icrs", unit=u.deg)
m31_astro = m31.skyoffset_frame()
m31_in_m31 = m31.transform_to(m31_astro)
m33_in_m31 = m33.transform_to(m31_astro)
assert_allclose(
[m31_in_m31.lon, m31_in_m31.lat], [0, 0] * u.deg, atol=1e-10 * u.deg
)
assert_allclose(
[m33_in_m31.lon, m33_in_m31.lat], [11.13135175, -9.79084759] * u.deg
)
assert_allclose(
m33.separation(m31), np.hypot(m33_in_m31.lon, m33_in_m31.lat), atol=0.1 * u.deg
)
# used below in the next parametrized test
m31_sys = [ICRS, FK5, Galactic]
m31_coo = [
(10.6847929, 41.2690650),
(10.6847929, 41.2690650),
(121.1744050, -21.5729360),
]
m31_dist = Distance(770, u.kpc)
convert_precision = 1 * u.arcsec
roundtrip_precision = 1e-4 * u.degree
dist_precision = 1e-9 * u.kpc
m31_params = []
for i in range(len(m31_sys)):
for j in range(len(m31_sys)):
if i < j:
m31_params.append((m31_sys[i], m31_sys[j], m31_coo[i], m31_coo[j]))
@pytest.mark.parametrize(("fromsys", "tosys", "fromcoo", "tocoo"), m31_params)
def test_m31_coord_transforms(fromsys, tosys, fromcoo, tocoo):
"""
This tests a variety of coordinate conversions for the Chandra point-source
catalog location of M31 from NED, via SkyOffsetFrames
"""
from_origin = fromsys(fromcoo[0] * u.deg, fromcoo[1] * u.deg, distance=m31_dist)
from_pos = SkyOffsetFrame(1 * u.deg, 1 * u.deg, origin=from_origin)
to_origin = tosys(tocoo[0] * u.deg, tocoo[1] * u.deg, distance=m31_dist)
to_astroframe = SkyOffsetFrame(origin=to_origin)
target_pos = from_pos.transform_to(to_astroframe)
assert_allclose(
to_origin.separation(target_pos),
np.hypot(from_pos.lon, from_pos.lat),
atol=convert_precision,
)
roundtrip_pos = target_pos.transform_to(from_pos)
assert_allclose(
[roundtrip_pos.lon.wrap_at(180 * u.deg), roundtrip_pos.lat],
[1.0 * u.deg, 1.0 * u.deg],
atol=convert_precision,
)
@pytest.mark.parametrize(
"rotation, expectedlatlon",
[
(0 * u.deg, [0, 1] * u.deg),
(180 * u.deg, [0, -1] * u.deg),
(90 * u.deg, [-1, 0] * u.deg),
(-90 * u.deg, [1, 0] * u.deg),
],
)
def test_rotation(rotation, expectedlatlon):
origin = ICRS(45 * u.deg, 45 * u.deg)
target = ICRS(45 * u.deg, 46 * u.deg)
aframe = SkyOffsetFrame(origin=origin, rotation=rotation)
trans = target.transform_to(aframe)
assert_allclose(
[trans.lon.wrap_at(180 * u.deg), trans.lat], expectedlatlon, atol=1e-10 * u.deg
)
@pytest.mark.parametrize(
"rotation, expectedlatlon",
[
(0 * u.deg, [0, 1] * u.deg),
(180 * u.deg, [0, -1] * u.deg),
(90 * u.deg, [-1, 0] * u.deg),
(-90 * u.deg, [1, 0] * u.deg),
],
)
def test_skycoord_skyoffset_frame_rotation(rotation, expectedlatlon):
"""Test if passing a rotation argument via SkyCoord works"""
origin = SkyCoord(45 * u.deg, 45 * u.deg)
target = SkyCoord(45 * u.deg, 46 * u.deg)
aframe = origin.skyoffset_frame(rotation=rotation)
trans = target.transform_to(aframe)
assert_allclose(
[trans.lon.wrap_at(180 * u.deg), trans.lat], expectedlatlon, atol=1e-10 * u.deg
)
def test_skyoffset_names():
origin1 = ICRS(45 * u.deg, 45 * u.deg)
aframe1 = SkyOffsetFrame(origin=origin1)
assert type(aframe1).__name__ == "SkyOffsetICRS"
origin2 = Galactic(45 * u.deg, 45 * u.deg)
aframe2 = SkyOffsetFrame(origin=origin2)
assert type(aframe2).__name__ == "SkyOffsetGalactic"
def test_skyoffset_origindata():
origin = ICRS()
with pytest.raises(ValueError):
SkyOffsetFrame(origin=origin)
def test_skyoffset_lonwrap():
origin = ICRS(45 * u.deg, 45 * u.deg)
sc = SkyCoord(190 * u.deg, -45 * u.deg, frame=SkyOffsetFrame(origin=origin))
assert sc.lon < 180 * u.deg
sc2 = SkyCoord(-10 * u.deg, -45 * u.deg, frame=SkyOffsetFrame(origin=origin))
assert sc2.lon < 180 * u.deg
sc3 = sc.realize_frame(sc.represent_as("cartesian"))
assert sc3.lon < 180 * u.deg
sc4 = sc2.realize_frame(sc2.represent_as("cartesian"))
assert sc4.lon < 180 * u.deg
def test_skyoffset_velocity():
c = ICRS(
ra=170.9 * u.deg,
dec=-78.4 * u.deg,
pm_ra_cosdec=74.4134 * u.mas / u.yr,
pm_dec=-93.2342 * u.mas / u.yr,
)
skyoffset_frame = SkyOffsetFrame(origin=c)
c_skyoffset = c.transform_to(skyoffset_frame)
assert_allclose(c_skyoffset.pm_lon_coslat, c.pm_ra_cosdec)
assert_allclose(c_skyoffset.pm_lat, c.pm_dec)
@pytest.mark.parametrize(
"rotation, expectedpmlonlat",
[
(0 * u.deg, [1, 2] * u.mas / u.yr),
(45 * u.deg, [-(2**-0.5), 3 * 2**-0.5] * u.mas / u.yr),
(90 * u.deg, [-2, 1] * u.mas / u.yr),
(180 * u.deg, [-1, -2] * u.mas / u.yr),
(-90 * u.deg, [2, -1] * u.mas / u.yr),
],
)
def test_skyoffset_velocity_rotation(rotation, expectedpmlonlat):
sc = SkyCoord(
ra=170.9 * u.deg,
dec=-78.4 * u.deg,
pm_ra_cosdec=1 * u.mas / u.yr,
pm_dec=2 * u.mas / u.yr,
)
c_skyoffset0 = sc.transform_to(sc.skyoffset_frame(rotation=rotation))
assert_allclose(c_skyoffset0.pm_lon_coslat, expectedpmlonlat[0])
assert_allclose(c_skyoffset0.pm_lat, expectedpmlonlat[1])
def test_skyoffset_two_frames_interfering():
"""Regression test for gh-11277, where it turned out that the
origin argument validation from one SkyOffsetFrame could interfere
with that of another.
Note that this example brought out a different bug than that at the
top of gh-11277, viz., that an attempt was made to set origin on a SkyCoord
when it should just be stay as part of the SkyOffsetFrame.
"""
# Example adapted from @bmerry's minimal example at
# https://github.com/astropy/astropy/issues/11277#issuecomment-825492335
altaz_frame = AltAz(
obstime=Time("2020-04-22T13:00:00Z"), location=EarthLocation(18, -30)
)
target = SkyCoord(alt=70 * u.deg, az=150 * u.deg, frame=altaz_frame)
dirs_altaz_offset = SkyCoord(
lon=[-0.02, 0.01, 0.0, 0.0, 0.0] * u.rad,
lat=[0.0, 0.2, 0.0, -0.3, 0.1] * u.rad,
frame=target.skyoffset_frame(),
)
dirs_altaz = dirs_altaz_offset.transform_to(altaz_frame)
dirs_icrs = dirs_altaz.transform_to(ICRS())
target_icrs = target.transform_to(ICRS())
# The line below was almost guaranteed to fail.
dirs_icrs.transform_to(target_icrs.skyoffset_frame())
|
1b52c7bde37a52c78c9b2da07a934b354862a8ac47faf1ff0464f740acf8eb37 | import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.coordinates.spectral_quantity import SpectralQuantity
from astropy.tests.helper import assert_quantity_allclose
SPECTRAL_UNITS = (u.GHz, u.micron, u.keV, (1 / u.nm).unit, u.km / u.s)
class TestSpectralQuantity:
@pytest.mark.parametrize("unit", SPECTRAL_UNITS)
def test_init_value(self, unit):
SpectralQuantity(1, unit=unit)
@pytest.mark.parametrize("unit", SPECTRAL_UNITS)
def test_init_quantity(self, unit):
SpectralQuantity(1 * unit)
@pytest.mark.parametrize("unit", SPECTRAL_UNITS)
def test_init_spectralquantity(self, unit):
SpectralQuantity(SpectralQuantity(1, unit=unit))
@pytest.mark.parametrize("unit", (u.kg, u.byte))
def test_init_invalid(self, unit):
with pytest.raises(
u.UnitsError, match="SpectralQuantity instances require units"
):
SpectralQuantity(1, unit=unit)
with pytest.raises(
u.UnitsError, match="SpectralQuantity instances require units"
):
SpectralQuantity(1 * unit)
@pytest.mark.parametrize(("unit1", "unit2"), zip(SPECTRAL_UNITS, SPECTRAL_UNITS))
def test_spectral_conversion(self, unit1, unit2):
sq1 = SpectralQuantity(1 * unit1)
sq2 = sq1.to(unit2)
sq3 = sq2.to(str(unit1)) # check that string units work
assert isinstance(sq2, SpectralQuantity)
assert isinstance(sq3, SpectralQuantity)
assert_quantity_allclose(sq1, sq3)
def test_doppler_conversion(self):
sq1 = SpectralQuantity(
1 * u.km / u.s, doppler_convention="optical", doppler_rest=500 * u.nm
)
sq2 = sq1.to(u.m / u.s)
assert_allclose(sq2.value, 1000)
sq3 = sq1.to(u.m / u.s, doppler_convention="radio")
assert_allclose(sq3.value, 999.996664)
sq4 = sq1.to(u.m / u.s, doppler_convention="relativistic")
assert_allclose(sq4.value, 999.998332)
sq5 = sq1.to(u.m / u.s, doppler_rest=499.9 * u.nm)
assert_allclose(sq5.value, 60970.685737)
val5 = sq1.to_value(u.m / u.s, doppler_rest=499.9 * u.nm)
assert_allclose(val5, 60970.685737)
def test_doppler_conversion_validation(self):
sq1 = SpectralQuantity(1 * u.GHz)
sq2 = SpectralQuantity(1 * u.km / u.s)
with pytest.raises(
ValueError,
match="doppler_convention not set, cannot convert to/from velocities",
):
sq1.to(u.km / u.s)
with pytest.raises(
ValueError,
match="doppler_convention not set, cannot convert to/from velocities",
):
sq2.to(u.GHz)
with pytest.raises(
ValueError, match="doppler_rest not set, cannot convert to/from velocities"
):
sq1.to(u.km / u.s, doppler_convention="radio")
with pytest.raises(
ValueError, match="doppler_rest not set, cannot convert to/from velocities"
):
sq2.to(u.GHz, doppler_convention="radio")
with pytest.raises(
u.UnitsError,
match="Argument 'doppler_rest' to function 'to' must be in units",
):
sq1.to(u.km / u.s, doppler_convention="radio", doppler_rest=5 * u.kg)
with pytest.raises(
u.UnitsError,
match="Argument 'doppler_rest' to function 'to' must be in units",
):
sq2.to(u.GHz, doppler_convention="radio", doppler_rest=5 * u.kg)
with pytest.raises(
ValueError,
match="doppler_convention should be one of optical/radio/relativistic",
):
sq1.to(u.km / u.s, doppler_convention="banana", doppler_rest=5 * u.GHz)
with pytest.raises(
ValueError,
match="doppler_convention should be one of optical/radio/relativistic",
):
sq2.to(u.GHz, doppler_convention="banana", doppler_rest=5 * u.GHz)
with pytest.raises(ValueError, match="Original doppler_convention not set"):
sq2.to(u.km / u.s, doppler_convention="radio")
with pytest.raises(ValueError, match="Original doppler_rest not set"):
sq2.to(u.km / u.s, doppler_rest=5 * u.GHz)
def test_doppler_set_parameters(self):
sq1 = SpectralQuantity(1 * u.km / u.s)
with pytest.raises(
ValueError,
match="doppler_convention should be one of optical/radio/relativistic",
):
sq1.doppler_convention = "banana"
assert sq1.doppler_convention is None
sq1.doppler_convention = "radio"
assert sq1.doppler_convention == "radio"
with pytest.raises(
AttributeError,
match="doppler_convention has already been set, and cannot be changed",
):
sq1.doppler_convention = "optical"
assert sq1.doppler_convention == "radio"
with pytest.raises(
u.UnitsError,
match="Argument 'value' to function 'doppler_rest' must be in units",
):
sq1.doppler_rest = 5 * u.kg
sq1.doppler_rest = 5 * u.GHz
assert_quantity_allclose(sq1.doppler_rest, 5 * u.GHz)
with pytest.raises(
AttributeError,
match="doppler_rest has already been set, and cannot be changed",
):
sq1.doppler_rest = 4 * u.GHz
assert_quantity_allclose(sq1.doppler_rest, 5 * u.GHz)
def test_arithmetic(self):
# Checks for arithmetic - some operations should return SpectralQuantity,
# while some should just return plain Quantity
# First, operations that should return SpectralQuantity
sq1 = SpectralQuantity(10 * u.AA)
sq2 = sq1 * 2
assert isinstance(sq2, SpectralQuantity)
assert sq2.value == 20
assert sq2.unit == u.AA
sq2 = sq1 / 2
assert isinstance(sq2, SpectralQuantity)
assert sq2.value == 5
assert sq2.unit == u.AA
sq3 = SpectralQuantity(10 * u.AA)
sq3 *= 2
assert isinstance(sq3, SpectralQuantity)
assert sq3.value == 20
assert sq3.unit == u.AA
sq4 = SpectralQuantity(10 * u.AA)
sq4 /= 2
assert isinstance(sq4, SpectralQuantity)
assert sq4.value == 5
assert sq4.unit == u.AA
sq5 = SpectralQuantity(10 * u.AA)
with pytest.raises(
TypeError,
match="Cannot store the result of this operation in SpectralQuantity",
):
sq5 += 10 * u.AA
# Note different order to sq2
sq6 = SpectralQuantity(10 * u.AA)
sq6 = 2 * sq1
assert isinstance(sq6, SpectralQuantity)
assert sq6.value == 20
assert sq6.unit == u.AA
# Next, operations that should return Quantity
q1 = sq1 / u.s
assert isinstance(q1, u.Quantity) and not isinstance(q1, SpectralQuantity)
assert q1.value == 10
assert q1.unit.is_equivalent(u.AA / u.s)
q2 = sq1 / u.kg
assert isinstance(q2, u.Quantity) and not isinstance(q2, SpectralQuantity)
assert q2.value == 10
assert q2.unit.is_equivalent(u.AA / u.kg)
q3 = sq1 + 10 * u.AA
assert isinstance(q3, u.Quantity) and not isinstance(q3, SpectralQuantity)
assert q3.value == 20
assert q3.unit == u.AA
q4 = sq1 / SpectralQuantity(5 * u.AA)
assert isinstance(q4, u.Quantity) and not isinstance(q4, SpectralQuantity)
assert q4.value == 2
assert q4.unit == u.one
def test_ufuncs(self):
# Checks for ufuncs - some operations should return SpectralQuantity,
# while some should just return plain Quantity
# First, operations that should return SpectralQuantity
sq1 = SpectralQuantity([10, 20, 30] * u.AA)
for ufunc in (np.min, np.max):
sq2 = ufunc(sq1)
assert isinstance(sq2, SpectralQuantity)
assert sq2.value == ufunc(sq1.value)
assert sq2.unit == u.AA
def test_functions(self):
# Checks for other functions - some operations should return SpectralQuantity,
# while some should just return plain Quantity
# First, operations that should return SpectralQuantity
sq1 = SpectralQuantity([10, 20, 30] * u.AA)
for func in (np.nanmin, np.nanmax):
sq2 = func(sq1)
assert isinstance(sq2, SpectralQuantity)
assert sq2.value == func(sq1.value)
assert sq2.unit == u.AA
# Next, operations that should return Quantity
for func in (np.sum,):
q3 = func(sq1)
assert isinstance(q3, u.Quantity) and not isinstance(q3, SpectralQuantity)
assert q3.value == func(sq1.value)
assert q3.unit == u.AA
@pytest.mark.xfail
def test_functions_std(self):
# np.std should return a Quantity but it returns a SpectralQuantity. We
# make this a separate xfailed test for now, but once this passes,
# np.std could also just be added to the main test_functions test.
# See https://github.com/astropy/astropy/issues/10245 for more details.
# Checks for other functions - some operations should return SpectralQuantity,
# while some should just return plain Quantity
# First, operations that should return SpectralQuantity
sq1 = SpectralQuantity([10, 20, 30] * u.AA)
q1 = np.std(sq1)
assert isinstance(q1, u.Quantity) and not isinstance(q1, SpectralQuantity)
assert q1.value == np.sum(sq1.value)
assert q1.unit == u.AA
|
0dc1cd44f1cf3737d098e698fc8585cfd1d24d93c835e034abf168bad75e2790 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains tests for the name resolve convenience module.
"""
import time
import urllib.request
import numpy as np
import pytest
from pytest_remotedata.disable_internet import no_internet
from astropy import units as u
from astropy.config import paths
from astropy.coordinates.name_resolve import (
NameResolveError,
_parse_response,
get_icrs_coordinates,
sesame_database,
sesame_url,
)
from astropy.coordinates.sky_coordinate import SkyCoord
_cached_ngc3642 = dict()
_cached_ngc3642[
"simbad"
] = """# NGC 3642 #Q22523669
#=S=Simbad (via url): 1
%@ 503952
%I.0 NGC 3642
%C.0 LIN
%C.N0 15.15.01.00
%J 170.5750583 +59.0742417 = 11:22:18.01 +59:04:27.2
%V z 1593 0.005327 [0.000060] D 2002LEDA.........0P
%D 1.673 1.657 75 (32767) (I) C 2006AJ....131.1163S
%T 5 =32800000 D 2011A&A...532A..74B
%#B 140
#====Done (2013-Feb-12,16:37:11z)===="""
_cached_ngc3642[
"vizier"
] = """# NGC 3642 #Q22523677
#=V=VizieR (local): 1
%J 170.56 +59.08 = 11:22.2 +59:05
%I.0 {NGC} 3642
#====Done (2013-Feb-12,16:37:42z)===="""
_cached_ngc3642[
"all"
] = """# ngc3642 #Q22523722
#=S=Simbad (via url): 1
%@ 503952
%I.0 NGC 3642
%C.0 LIN
%C.N0 15.15.01.00
%J 170.5750583 +59.0742417 = 11:22:18.01 +59:04:27.2
%V z 1593 0.005327 [0.000060] D 2002LEDA.........0P
%D 1.673 1.657 75 (32767) (I) C 2006AJ....131.1163S
%T 5 =32800000 D 2011A&A...532A..74B
%#B 140
#=V=VizieR (local): 1
%J 170.56 +59.08 = 11:22.2 +59:05
%I.0 {NGC} 3642
#!N=NED : *** Could not access the server ***
#====Done (2013-Feb-12,16:39:48z)===="""
_cached_castor = dict()
_cached_castor[
"all"
] = """# castor #Q22524249
#=S=Simbad (via url): 1
%@ 983633
%I.0 NAME CASTOR
%C.0 **
%C.N0 12.13.00.00
%J 113.649471640 +31.888282216 = 07:34:35.87 +31:53:17.8
%J.E [34.72 25.95 0] A 2007A&A...474..653V
%P -191.45 -145.19 [3.95 2.95 0] A 2007A&A...474..653V
%X 64.12 [3.75] A 2007A&A...474..653V
%S A1V+A2Vm =0.0000D200.0030.0110000000100000 C 2001AJ....122.3466M
%#B 179
#!V=VizieR (local): No table found for: castor
#!N=NED: ****object name not recognized by NED name interpreter
#!N=NED: ***Not recognized by NED: castor
#====Done (2013-Feb-12,16:52:02z)===="""
_cached_castor[
"simbad"
] = """# castor #Q22524495
#=S=Simbad (via url): 1
%@ 983633
%I.0 NAME CASTOR
%C.0 **
%C.N0 12.13.00.00
%J 113.649471640 +31.888282216 = 07:34:35.87 +31:53:17.8
%J.E [34.72 25.95 0] A 2007A&A...474..653V
%P -191.45 -145.19 [3.95 2.95 0] A 2007A&A...474..653V
%X 64.12 [3.75] A 2007A&A...474..653V
%S A1V+A2Vm =0.0000D200.0030.0110000000100000 C 2001AJ....122.3466M
%#B 179
#====Done (2013-Feb-12,17:00:39z)===="""
@pytest.mark.remote_data
def test_names():
# First check that sesame is up
if (
urllib.request.urlopen(
"http://cdsweb.u-strasbg.fr/cgi-bin/nph-sesame"
).getcode()
!= 200
):
pytest.skip(
"SESAME appears to be down, skipping test_name_resolve.py:test_names()..."
)
with pytest.raises(NameResolveError):
get_icrs_coordinates("m87h34hhh")
try:
icrs = get_icrs_coordinates("NGC 3642")
except NameResolveError:
ra, dec = _parse_response(_cached_ngc3642["all"])
icrs = SkyCoord(ra=float(ra) * u.degree, dec=float(dec) * u.degree)
icrs_true = SkyCoord(ra="11h 22m 18.014s", dec="59d 04m 27.27s")
# use precision of only 1 decimal here and below because the result can
# change due to Sesame server-side changes.
np.testing.assert_almost_equal(icrs.ra.degree, icrs_true.ra.degree, 1)
np.testing.assert_almost_equal(icrs.dec.degree, icrs_true.dec.degree, 1)
try:
icrs = get_icrs_coordinates("castor")
except NameResolveError:
ra, dec = _parse_response(_cached_castor["all"])
icrs = SkyCoord(ra=float(ra) * u.degree, dec=float(dec) * u.degree)
icrs_true = SkyCoord(ra="07h 34m 35.87s", dec="+31d 53m 17.8s")
np.testing.assert_almost_equal(icrs.ra.degree, icrs_true.ra.degree, 1)
np.testing.assert_almost_equal(icrs.dec.degree, icrs_true.dec.degree, 1)
@pytest.mark.remote_data
def test_name_resolve_cache(tmp_path):
from astropy.utils.data import get_cached_urls
target_name = "castor"
(temp_cache_dir := tmp_path / "cache").mkdir()
with paths.set_temp_cache(temp_cache_dir, delete=True):
assert len(get_cached_urls()) == 0
icrs1 = get_icrs_coordinates(target_name, cache=True)
urls = get_cached_urls()
assert len(urls) == 1
expected_urls = sesame_url.get()
assert any(
urls[0].startswith(x) for x in expected_urls
), f"{urls[0]} not in {expected_urls}"
# Try reloading coordinates, now should just reload cached data:
with no_internet():
icrs2 = get_icrs_coordinates(target_name, cache=True)
assert len(get_cached_urls()) == 1
assert u.allclose(icrs1.ra, icrs2.ra)
assert u.allclose(icrs1.dec, icrs2.dec)
def test_names_parse():
# a few test cases for parsing embedded coordinates from object name
test_names = [
"CRTS SSS100805 J194428-420209",
"MASTER OT J061451.7-272535.5",
"2MASS J06495091-0737408",
"1RXS J042555.8-194534",
"SDSS J132411.57+032050.5",
"DENIS-P J203137.5-000511",
"2QZ J142438.9-022739",
"CXOU J141312.3-652013",
]
for name in test_names:
sc = get_icrs_coordinates(name, parse=True)
@pytest.mark.remote_data
@pytest.mark.parametrize(
("name", "db_dict"), [("NGC 3642", _cached_ngc3642), ("castor", _cached_castor)]
)
def test_database_specify(name, db_dict):
# First check that at least some sesame mirror is up
for url in sesame_url.get():
if urllib.request.urlopen(url).getcode() == 200:
break
else:
pytest.skip(
"All SESAME mirrors appear to be down, skipping "
"test_name_resolve.py:test_database_specify()..."
)
for db in db_dict.keys():
with sesame_database.set(db):
icrs = SkyCoord.from_name(name)
time.sleep(1)
|
32c9baf662ae4072f2566757fad7cc7cbc565ef2242543d2b19ab64c9519a252 | import pickle
import numpy as np
import pytest
import astropy.units as u
from astropy import coordinates as coord
from astropy.coordinates import Longitude
from astropy.tests.helper import check_pickling_recovery, pickle_protocol # noqa: F401
# Can't test distances without scipy due to cosmology deps
from astropy.utils.compat.optional_deps import HAS_SCIPY
def test_basic():
lon1 = Longitude(1.23, "radian", wrap_angle="180d")
s = pickle.dumps(lon1)
lon2 = pickle.loads(s)
def test_pickle_longitude_wrap_angle():
a = Longitude(1.23, "radian", wrap_angle="180d")
s = pickle.dumps(a)
b = pickle.loads(s)
assert a.rad == b.rad
assert a.wrap_angle == b.wrap_angle
_names = [
coord.Angle,
coord.Distance,
coord.DynamicMatrixTransform,
coord.ICRS,
coord.Latitude,
coord.Longitude,
coord.StaticMatrixTransform,
]
_xfail = [False, not HAS_SCIPY, True, True, False, True, False]
_args = [
[0.0],
[],
[lambda *args: np.identity(3), coord.ICRS, coord.ICRS],
[0, 0],
[0],
[0],
[np.identity(3), coord.ICRS, coord.ICRS],
]
_kwargs = [
{"unit": "radian"},
{"z": 0.23},
{},
{"unit": ["radian", "radian"]},
{"unit": "radian"},
{"unit": "radian"},
{},
]
@pytest.mark.parametrize(
("name", "args", "kwargs", "xfail"), tuple(zip(_names, _args, _kwargs, _xfail))
)
def test_simple_object(pickle_protocol, name, args, kwargs, xfail): # noqa: F811
# Tests easily instantiated objects
if xfail:
pytest.xfail()
original = name(*args, **kwargs)
check_pickling_recovery(original, pickle_protocol)
class _CustomICRS(coord.ICRS):
default_representation = coord.PhysicsSphericalRepresentation
@pytest.mark.parametrize(
"frame",
[
coord.SkyOffsetFrame(origin=coord.ICRS(0 * u.deg, 0 * u.deg)),
coord.SkyOffsetFrame(
5 * u.deg, 10 * u.deg, origin=coord.Galactic(2 * u.deg, -3 * u.deg)
),
coord.SkyOffsetFrame(
5 * u.deg,
10 * u.deg,
10 * u.pc,
origin=coord.Galactic(2 * u.deg, -3 * u.deg),
representation_type=coord.PhysicsSphericalRepresentation,
),
coord.SkyOffsetFrame(
5 * u.deg,
10 * u.deg,
0 * u.pc,
origin=_CustomICRS(2 * u.deg, 3 * u.deg, 1 * u.pc),
),
],
)
def test_skyoffset_pickle(pickle_protocol, frame): # noqa: F811
"""
This is a regression test for issue #9249:
https://github.com/astropy/astropy/issues/9249
"""
check_pickling_recovery(frame, pickle_protocol)
|
1fe6576416b3412c868572bd5945c36585b053420fa18772b24400fd3a7c61de | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import (
CartesianDifferential,
CartesianRepresentation,
EarthLocation,
SkyCoord,
galactocentric_frame_defaults,
)
from astropy.coordinates.builtin_frames import (
CIRS,
FK4,
FK5,
GCRS,
HCRS,
ICRS,
LSR,
FK4NoETerms,
Galactic,
GalacticLSR,
Galactocentric,
Supergalactic,
)
from astropy.coordinates.distances import Distance
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.time import Time
from astropy.units import allclose
# used below in the next parametrized test
m31_sys = [ICRS, FK5, FK4, Galactic]
m31_coo = [
(10.6847929, 41.2690650),
(10.6847929, 41.2690650),
(10.0004738, 40.9952444),
(121.1744050, -21.5729360),
]
m31_dist = Distance(770, u.kpc)
convert_precision = 1 * u.arcsec
roundtrip_precision = 1e-4 * u.degree
dist_precision = 1e-9 * u.kpc
m31_params = []
for i in range(len(m31_sys)):
for j in range(len(m31_sys)):
if i < j:
m31_params.append((m31_sys[i], m31_sys[j], m31_coo[i], m31_coo[j]))
@pytest.mark.parametrize(("fromsys", "tosys", "fromcoo", "tocoo"), m31_params)
def test_m31_coord_transforms(fromsys, tosys, fromcoo, tocoo):
"""
This tests a variety of coordinate conversions for the Chandra point-source
catalog location of M31 from NED.
"""
coo1 = fromsys(ra=fromcoo[0] * u.deg, dec=fromcoo[1] * u.deg, distance=m31_dist)
coo2 = coo1.transform_to(tosys())
if tosys is FK4:
coo2_prec = coo2.transform_to(FK4(equinox=Time("B1950")))
# convert_precision <1 arcsec
assert (coo2_prec.spherical.lon - tocoo[0] * u.deg) < convert_precision
assert (coo2_prec.spherical.lat - tocoo[1] * u.deg) < convert_precision
else:
assert (coo2.spherical.lon - tocoo[0] * u.deg) < convert_precision # <1 arcsec
assert (coo2.spherical.lat - tocoo[1] * u.deg) < convert_precision
assert coo1.distance.unit == u.kpc
assert coo2.distance.unit == u.kpc
assert m31_dist.unit == u.kpc
assert (coo2.distance - m31_dist) < dist_precision
# check round-tripping
coo1_2 = coo2.transform_to(fromsys())
assert (coo1_2.spherical.lon - fromcoo[0] * u.deg) < roundtrip_precision
assert (coo1_2.spherical.lat - fromcoo[1] * u.deg) < roundtrip_precision
assert (coo1_2.distance - m31_dist) < dist_precision
def test_precession():
"""
Ensures that FK4 and FK5 coordinates precess their equinoxes
"""
j2000 = Time("J2000")
b1950 = Time("B1950")
j1975 = Time("J1975")
b1975 = Time("B1975")
fk4 = FK4(ra=1 * u.radian, dec=0.5 * u.radian)
assert fk4.equinox.byear == b1950.byear
fk4_2 = fk4.transform_to(FK4(equinox=b1975))
assert fk4_2.equinox.byear == b1975.byear
fk5 = FK5(ra=1 * u.radian, dec=0.5 * u.radian)
assert fk5.equinox.jyear == j2000.jyear
fk5_2 = fk5.transform_to(FK4(equinox=j1975))
assert fk5_2.equinox.jyear == j1975.jyear
def test_fk5_galactic():
"""
Check that FK5 -> Galactic gives the same as FK5 -> FK4 -> Galactic.
"""
fk5 = FK5(ra=1 * u.deg, dec=2 * u.deg)
direct = fk5.transform_to(Galactic())
indirect = fk5.transform_to(FK4()).transform_to(Galactic())
assert direct.separation(indirect).degree < 1.0e-10
direct = fk5.transform_to(Galactic())
indirect = fk5.transform_to(FK4NoETerms()).transform_to(Galactic())
assert direct.separation(indirect).degree < 1.0e-10
def test_galactocentric():
# when z_sun=0, transformation should be very similar to Galactic
icrs_coord = ICRS(
ra=np.linspace(0, 360, 10) * u.deg,
dec=np.linspace(-90, 90, 10) * u.deg,
distance=1.0 * u.kpc,
)
g_xyz = icrs_coord.transform_to(Galactic()).cartesian.xyz
with galactocentric_frame_defaults.set("pre-v4.0"):
gc_xyz = icrs_coord.transform_to(Galactocentric(z_sun=0 * u.kpc)).cartesian.xyz
diff = np.abs(g_xyz - gc_xyz)
assert allclose(diff[0], 8.3 * u.kpc, atol=1e-5 * u.kpc)
assert allclose(diff[1:], 0 * u.kpc, atol=1e-5 * u.kpc)
# generate some test coordinates
g = Galactic(
l=[0, 0, 45, 315] * u.deg,
b=[-45, 45, 0, 0] * u.deg,
distance=[np.sqrt(2)] * 4 * u.kpc,
)
with galactocentric_frame_defaults.set("pre-v4.0"):
xyz = g.transform_to(
Galactocentric(galcen_distance=1.0 * u.kpc, z_sun=0.0 * u.pc)
).cartesian.xyz
true_xyz = np.array([[0, 0, -1.0], [0, 0, 1], [0, 1, 0], [0, -1, 0]]).T * u.kpc
assert allclose(xyz.to(u.kpc), true_xyz.to(u.kpc), atol=1e-5 * u.kpc)
# check that ND arrays work
# from Galactocentric to Galactic
x = np.linspace(-10.0, 10.0, 100) * u.kpc
y = np.linspace(-10.0, 10.0, 100) * u.kpc
z = np.zeros_like(x)
# from Galactic to Galactocentric
l = np.linspace(15, 30.0, 100) * u.deg
b = np.linspace(-10.0, 10.0, 100) * u.deg
d = np.ones_like(l.value) * u.kpc
with galactocentric_frame_defaults.set("latest"):
g1 = Galactocentric(x=x, y=y, z=z)
g2 = Galactocentric(
x=x.reshape(100, 1, 1), y=y.reshape(100, 1, 1), z=z.reshape(100, 1, 1)
)
g1t = g1.transform_to(Galactic())
g2t = g2.transform_to(Galactic())
assert_allclose(g1t.cartesian.xyz, g2t.cartesian.xyz[:, :, 0, 0])
g1 = Galactic(l=l, b=b, distance=d)
g2 = Galactic(
l=l.reshape(100, 1, 1),
b=b.reshape(100, 1, 1),
distance=d.reshape(100, 1, 1),
)
g1t = g1.transform_to(Galactocentric())
g2t = g2.transform_to(Galactocentric())
np.testing.assert_almost_equal(
g1t.cartesian.xyz.value, g2t.cartesian.xyz.value[:, :, 0, 0]
)
def test_supergalactic():
"""
Check Galactic<->Supergalactic and Galactic<->ICRS conversion.
"""
# Check supergalactic North pole.
npole = Galactic(l=47.37 * u.degree, b=+6.32 * u.degree)
assert allclose(npole.transform_to(Supergalactic()).sgb.deg, +90, atol=1e-9)
# Check the origin of supergalactic longitude.
lon0 = Supergalactic(sgl=0 * u.degree, sgb=0 * u.degree)
lon0_gal = lon0.transform_to(Galactic())
assert allclose(lon0_gal.l.deg, 137.37, atol=1e-9)
assert allclose(lon0_gal.b.deg, 0, atol=1e-9)
# Test Galactic<->ICRS with some positions that appear in Foley et al. 2008
# (https://ui.adsabs.harvard.edu/abs/2008A%26A...484..143F)
# GRB 021219
supergalactic = Supergalactic(sgl=29.91 * u.degree, sgb=+73.72 * u.degree)
icrs = SkyCoord("18h50m27s +31d57m17s")
assert supergalactic.separation(icrs) < 0.005 * u.degree
# GRB 030320
supergalactic = Supergalactic(sgl=-174.44 * u.degree, sgb=+46.17 * u.degree)
icrs = SkyCoord("17h51m36s -25d18m52s")
assert supergalactic.separation(icrs) < 0.005 * u.degree
class TestHCRS:
"""
Check HCRS<->ICRS coordinate conversions.
Uses ICRS Solar positions predicted by get_body_barycentric; with `t1` and
`tarr` as defined below, the ICRS Solar positions were predicted using, e.g.
coord.ICRS(coord.get_body_barycentric(tarr, 'sun')).
"""
def setup_method(self):
self.t1 = Time("2013-02-02T23:00")
self.t2 = Time("2013-08-02T23:00")
self.tarr = Time(["2013-02-02T23:00", "2013-08-02T23:00"])
self.sun_icrs_scalar = ICRS(
ra=244.52984668 * u.deg,
dec=-22.36943723 * u.deg,
distance=406615.66347377 * u.km,
)
# array of positions corresponds to times in `tarr`
self.sun_icrs_arr = ICRS(
ra=[244.52989062, 271.40976248] * u.deg,
dec=[-22.36943605, -25.07431079] * u.deg,
distance=[406615.66347377, 375484.13558956] * u.km,
)
# corresponding HCRS positions
self.sun_hcrs_t1 = HCRS(
CartesianRepresentation([0.0, 0.0, 0.0] * u.km), obstime=self.t1
)
twod_rep = CartesianRepresentation([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]] * u.km)
self.sun_hcrs_tarr = HCRS(twod_rep, obstime=self.tarr)
self.tolerance = 5 * u.km
def test_from_hcrs(self):
# test scalar transform
transformed = self.sun_hcrs_t1.transform_to(ICRS())
separation = transformed.separation_3d(self.sun_icrs_scalar)
assert_allclose(separation, 0 * u.km, atol=self.tolerance)
# test non-scalar positions and times
transformed = self.sun_hcrs_tarr.transform_to(ICRS())
separation = transformed.separation_3d(self.sun_icrs_arr)
assert_allclose(separation, 0 * u.km, atol=self.tolerance)
def test_from_icrs(self):
# scalar positions
transformed = self.sun_icrs_scalar.transform_to(HCRS(obstime=self.t1))
separation = transformed.separation_3d(self.sun_hcrs_t1)
assert_allclose(separation, 0 * u.km, atol=self.tolerance)
# nonscalar positions
transformed = self.sun_icrs_arr.transform_to(HCRS(obstime=self.tarr))
separation = transformed.separation_3d(self.sun_hcrs_tarr)
assert_allclose(separation, 0 * u.km, atol=self.tolerance)
class TestHelioBaryCentric:
"""
Check GCRS<->Heliocentric and Barycentric coordinate conversions.
Uses the WHT observing site (information grabbed from data/sites.json).
"""
def setup_method(self):
wht = EarthLocation(342.12 * u.deg, 28.758333333333333 * u.deg, 2327 * u.m)
self.obstime = Time("2013-02-02T23:00")
self.wht_itrs = wht.get_itrs(obstime=self.obstime)
def test_heliocentric(self):
gcrs = self.wht_itrs.transform_to(GCRS(obstime=self.obstime))
helio = gcrs.transform_to(HCRS(obstime=self.obstime))
# Check it doesn't change from previous times.
previous = [-1.02597256e11, 9.71725820e10, 4.21268419e10] * u.m
assert_allclose(helio.cartesian.xyz, previous)
# And that it agrees with SLALIB to within 14km
helio_slalib = [-0.685820296, 0.6495585893, 0.2816005464] * u.au
assert np.sqrt(((helio.cartesian.xyz - helio_slalib) ** 2).sum()) < 14.0 * u.km
def test_barycentric(self):
gcrs = self.wht_itrs.transform_to(GCRS(obstime=self.obstime))
bary = gcrs.transform_to(ICRS())
previous = [-1.02758958e11, 9.68331109e10, 4.19720938e10] * u.m
assert_allclose(bary.cartesian.xyz, previous)
# And that it agrees with SLALIB answer to within 14km
bary_slalib = [-0.6869012079, 0.6472893646, 0.2805661191] * u.au
assert np.sqrt(((bary.cartesian.xyz - bary_slalib) ** 2).sum()) < 14.0 * u.km
def test_lsr_sanity():
# random numbers, but zero velocity in ICRS frame
icrs = ICRS(
ra=15.1241 * u.deg,
dec=17.5143 * u.deg,
distance=150.12 * u.pc,
pm_ra_cosdec=0 * u.mas / u.yr,
pm_dec=0 * u.mas / u.yr,
radial_velocity=0 * u.km / u.s,
)
lsr = icrs.transform_to(LSR())
lsr_diff = lsr.data.differentials["s"]
cart_lsr_vel = lsr_diff.represent_as(CartesianRepresentation, base=lsr.data)
lsr_vel = ICRS(cart_lsr_vel)
gal_lsr = lsr_vel.transform_to(Galactic()).cartesian.xyz
assert allclose(gal_lsr.to(u.km / u.s, u.dimensionless_angles()), lsr.v_bary.d_xyz)
# moving with LSR velocity
lsr = LSR(
ra=15.1241 * u.deg,
dec=17.5143 * u.deg,
distance=150.12 * u.pc,
pm_ra_cosdec=0 * u.mas / u.yr,
pm_dec=0 * u.mas / u.yr,
radial_velocity=0 * u.km / u.s,
)
icrs = lsr.transform_to(ICRS())
icrs_diff = icrs.data.differentials["s"]
cart_vel = icrs_diff.represent_as(CartesianRepresentation, base=icrs.data)
vel = ICRS(cart_vel)
gal_icrs = vel.transform_to(Galactic()).cartesian.xyz
assert allclose(
gal_icrs.to(u.km / u.s, u.dimensionless_angles()), -lsr.v_bary.d_xyz
)
def test_hcrs_icrs_differentials():
# Regression to ensure that we can transform velocities from HCRS to LSR.
# Numbers taken from the original issue, gh-6835.
hcrs = HCRS(
ra=8.67 * u.deg,
dec=53.09 * u.deg,
distance=117 * u.pc,
pm_ra_cosdec=4.8 * u.mas / u.yr,
pm_dec=-15.16 * u.mas / u.yr,
radial_velocity=23.42 * u.km / u.s,
)
icrs = hcrs.transform_to(ICRS())
# The position and velocity should not change much
assert allclose(hcrs.cartesian.xyz, icrs.cartesian.xyz, rtol=1e-8)
assert allclose(hcrs.velocity.d_xyz, icrs.velocity.d_xyz, rtol=1e-2)
hcrs2 = icrs.transform_to(HCRS())
# The values should round trip
assert allclose(hcrs.cartesian.xyz, hcrs2.cartesian.xyz, rtol=1e-12)
assert allclose(hcrs.velocity.d_xyz, hcrs2.velocity.d_xyz, rtol=1e-12)
def test_cirs_icrs():
"""
Test CIRS<->ICRS transformations, including self transform
"""
t = Time("J2010")
MOONDIST = 385000 * u.km # approximate moon semi-major orbit axis of moon
MOONDIST_CART = CartesianRepresentation(
3**-0.5 * MOONDIST, 3**-0.5 * MOONDIST, 3**-0.5 * MOONDIST
)
loc = EarthLocation(lat=0 * u.deg, lon=0 * u.deg)
cirs_geo_frame = CIRS(obstime=t)
cirs_topo_frame = CIRS(obstime=t, location=loc)
moon_geo = cirs_geo_frame.realize_frame(MOONDIST_CART)
moon_topo = moon_geo.transform_to(cirs_topo_frame)
# now check that the distance change is similar to earth radius
assert (
1000 * u.km
< np.abs(moon_topo.distance - moon_geo.distance).to(u.au)
< 7000 * u.km
)
# now check that it round-trips
moon2 = moon_topo.transform_to(moon_geo)
assert_allclose(moon_geo.cartesian.xyz, moon2.cartesian.xyz)
# now check ICRS transform gives a decent distance from Barycentre
moon_icrs = moon_geo.transform_to(ICRS())
assert_allclose(moon_icrs.distance - 1 * u.au, 0.0 * u.R_sun, atol=3 * u.R_sun)
@pytest.mark.parametrize("frame", [LSR, GalacticLSR])
def test_lsr_loopback(frame):
xyz = CartesianRepresentation(1, 2, 3) * u.AU
xyz = xyz.with_differentials(CartesianDifferential(4, 5, 6) * u.km / u.s)
v_bary = CartesianDifferential(5, 10, 15) * u.km / u.s
# Test that the loopback properly handles a change in v_bary
from_coo = frame(xyz) # default v_bary
to_frame = frame(v_bary=v_bary)
explicit_coo = from_coo.transform_to(ICRS()).transform_to(to_frame)
implicit_coo = from_coo.transform_to(to_frame)
# Confirm that the explicit transformation changes the velocity but not the position
assert allclose(explicit_coo.cartesian.xyz, from_coo.cartesian.xyz, rtol=1e-10)
assert not allclose(
explicit_coo.velocity.d_xyz, from_coo.velocity.d_xyz, rtol=1e-10
)
# Confirm that the loopback matches the explicit transformation
assert allclose(explicit_coo.cartesian.xyz, implicit_coo.cartesian.xyz, rtol=1e-10)
assert allclose(
explicit_coo.velocity.d_xyz, implicit_coo.velocity.d_xyz, rtol=1e-10
)
@pytest.mark.parametrize(
"to_frame",
[
Galactocentric(galcen_coord=ICRS(300 * u.deg, -30 * u.deg)),
Galactocentric(galcen_distance=10 * u.kpc),
Galactocentric(z_sun=10 * u.pc),
Galactocentric(roll=1 * u.deg),
],
)
def test_galactocentric_loopback(to_frame):
xyz = CartesianRepresentation(1, 2, 3) * u.pc
from_coo = Galactocentric(xyz)
explicit_coo = from_coo.transform_to(ICRS()).transform_to(to_frame)
implicit_coo = from_coo.transform_to(to_frame)
# Confirm that the explicit transformation changes the position
assert not allclose(explicit_coo.cartesian.xyz, from_coo.cartesian.xyz, rtol=1e-10)
# Confirm that the loopback matches the explicit transformation
assert allclose(explicit_coo.cartesian.xyz, implicit_coo.cartesian.xyz, rtol=1e-10)
|
c2b9dc7d5e35b88e9f3dedf90dcfec0d721533a6403146a529cab2876b8799eb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test initialization and other aspects of Angle and subclasses"""
import pickle
import threading
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
import astropy.units as u
from astropy.coordinates.angles import Angle, Latitude, Longitude
from astropy.coordinates.errors import (
IllegalHourError,
IllegalMinuteError,
IllegalMinuteWarning,
IllegalSecondError,
IllegalSecondWarning,
)
from astropy.utils.exceptions import AstropyDeprecationWarning
def test_create_angles():
"""
Tests creating and accessing Angle objects
"""
""" The "angle" is a fundamental object. The internal
representation is stored in radians, but this is transparent to the user.
Units *must* be specified rather than a default value be assumed. This is
as much for self-documenting code as anything else.
Angle objects simply represent a single angular coordinate. More specific
angular coordinates (e.g. Longitude, Latitude) are subclasses of Angle."""
a1 = Angle(54.12412, unit=u.degree)
a2 = Angle("54.12412", unit=u.degree)
a3 = Angle("54:07:26.832", unit=u.degree)
a4 = Angle("54.12412 deg")
a5 = Angle("54.12412 degrees")
a6 = Angle("54.12412°") # because we like Unicode
a8 = Angle("54°07'26.832\"")
a9 = Angle([54, 7, 26.832], unit=u.degree)
assert_allclose(a9.value, [54, 7, 26.832])
assert a9.unit is u.degree
a10 = Angle(3.60827466667, unit=u.hour)
a11 = Angle("3:36:29.7888000120", unit=u.hour)
with pytest.warns(AstropyDeprecationWarning, match="hms_to_hour"):
a12 = Angle((3, 36, 29.7888000120), unit=u.hour) # *must* be a tuple
with pytest.warns(AstropyDeprecationWarning, match="hms_to_hour"):
# Regression test for #5001
a13 = Angle((3, 36, 29.7888000120), unit="hour")
Angle(0.944644098745, unit=u.radian)
with pytest.raises(u.UnitsError):
Angle(54.12412)
# raises an exception because this is ambiguous
with pytest.raises(u.UnitsError):
Angle(54.12412, unit=u.m)
with pytest.raises(ValueError):
Angle(12.34, unit="not a unit")
a14 = Angle("03h36m29.7888000120") # no trailing 's', but unambiguous
a15 = Angle("5h4m3s") # single digits, no decimal
assert a15.unit == u.hourangle
a16 = Angle("1 d")
a17 = Angle("1 degree")
assert a16.degree == 1
assert a17.degree == 1
a18 = Angle("54 07.4472", unit=u.degree)
a19 = Angle("54:07.4472", unit=u.degree)
a20 = Angle("54d07.4472m", unit=u.degree)
a21 = Angle("3h36m", unit=u.hour)
a22 = Angle("3.6h", unit=u.hour)
a23 = Angle("- 3h", unit=u.hour)
a24 = Angle("+ 3h", unit=u.hour)
a25 = Angle(3.0, unit=u.hour**1)
# ensure the above angles that should match do
assert a1 == a2 == a3 == a4 == a5 == a6 == a8 == a18 == a19 == a20
assert_allclose(a1.radian, a2.radian)
assert_allclose(a2.degree, a3.degree)
assert_allclose(a3.radian, a4.radian)
assert_allclose(a4.radian, a5.radian)
assert_allclose(a5.radian, a6.radian)
assert_allclose(a10.degree, a11.degree)
assert a11 == a12 == a13 == a14
assert a21 == a22
assert a23 == -a24
assert a24 == a25
# check for illegal ranges / values
with pytest.raises(IllegalSecondError):
a = Angle("12 32 99", unit=u.degree)
with pytest.raises(IllegalMinuteError):
a = Angle("12 99 23", unit=u.degree)
with pytest.raises(IllegalSecondError):
a = Angle("12 32 99", unit=u.hour)
with pytest.raises(IllegalMinuteError):
a = Angle("12 99 23", unit=u.hour)
with pytest.raises(IllegalHourError):
a = Angle("99 25 51.0", unit=u.hour)
with pytest.raises(ValueError):
a = Angle("12 25 51.0xxx", unit=u.hour)
with pytest.raises(ValueError):
a = Angle("12h34321m32.2s")
assert a1 is not None
def test_angle_from_view():
q = np.arange(3.0) * u.deg
a = q.view(Angle)
assert type(a) is Angle
assert a.unit is q.unit
assert np.all(a == q)
q2 = np.arange(4) * u.m
with pytest.raises(u.UnitTypeError):
q2.view(Angle)
def test_angle_ops():
"""
Tests operations on Angle objects
"""
# Angles can be added and subtracted. Multiplication and division by a
# scalar is also permitted. A negative operator is also valid. All of
# these operate in a single dimension. Attempting to multiply or divide two
# Angle objects will return a quantity. An exception will be raised if it
# is attempted to store output with a non-angular unit in an Angle [#2718].
a1 = Angle(3.60827466667, unit=u.hour)
a2 = Angle("54:07:26.832", unit=u.degree)
a1 + a2 # creates new Angle object
a1 - a2
-a1
assert_allclose((a1 * 2).hour, 2 * 3.6082746666700003)
assert abs((a1 / 3.123456).hour - 3.60827466667 / 3.123456) < 1e-10
# commutativity
assert (2 * a1).hour == (a1 * 2).hour
a3 = Angle(a1) # makes a *copy* of the object, but identical content as a1
assert_allclose(a1.radian, a3.radian)
assert a1 is not a3
a4 = abs(-a1)
assert a4.radian == a1.radian
a5 = Angle(5.0, unit=u.hour)
assert a5 > a1
assert a5 >= a1
assert a1 < a5
assert a1 <= a5
# check operations with non-angular result give Quantity.
a6 = Angle(45.0, u.degree)
a7 = a6 * a5
assert type(a7) is u.Quantity
# but those with angular result yield Angle.
# (a9 is regression test for #5327)
a8 = a1 + 1.0 * u.deg
assert type(a8) is Angle
a9 = 1.0 * u.deg + a1
assert type(a9) is Angle
with pytest.raises(TypeError):
a6 *= a5
with pytest.raises(TypeError):
a6 *= u.m
with pytest.raises(TypeError):
np.sin(a6, out=a6)
def test_angle_methods():
# Most methods tested as part of the Quantity tests.
# A few tests here which caused problems before: #8368
a = Angle([0.0, 2.0], "deg")
a_mean = a.mean()
assert type(a_mean) is Angle
assert a_mean == 1.0 * u.degree
a_std = a.std()
assert type(a_std) is Angle
assert a_std == 1.0 * u.degree
a_var = a.var()
assert type(a_var) is u.Quantity
assert a_var == 1.0 * u.degree**2
a_ptp = a.ptp()
assert type(a_ptp) is Angle
assert a_ptp == 2.0 * u.degree
a_max = a.max()
assert type(a_max) is Angle
assert a_max == 2.0 * u.degree
a_min = a.min()
assert type(a_min) is Angle
assert a_min == 0.0 * u.degree
def test_angle_convert():
"""
Test unit conversion of Angle objects
"""
angle = Angle("54.12412", unit=u.degree)
assert_allclose(angle.hour, 3.60827466667)
assert_allclose(angle.radian, 0.944644098745)
assert_allclose(angle.degree, 54.12412)
assert len(angle.hms) == 3
assert isinstance(angle.hms, tuple)
assert angle.hms[0] == 3
assert angle.hms[1] == 36
assert_allclose(angle.hms[2], 29.78879999999947)
# also check that the namedtuple attribute-style access works:
assert angle.hms.h == 3
assert angle.hms.m == 36
assert_allclose(angle.hms.s, 29.78879999999947)
assert len(angle.dms) == 3
assert isinstance(angle.dms, tuple)
assert angle.dms[0] == 54
assert angle.dms[1] == 7
assert_allclose(angle.dms[2], 26.831999999992036)
# also check that the namedtuple attribute-style access works:
assert angle.dms.d == 54
assert angle.dms.m == 7
assert_allclose(angle.dms.s, 26.831999999992036)
assert isinstance(angle.dms[0], float)
assert isinstance(angle.hms[0], float)
# now make sure dms and signed_dms work right for negative angles
negangle = Angle("-54.12412", unit=u.degree)
assert negangle.dms.d == -54
assert negangle.dms.m == -7
assert_allclose(negangle.dms.s, -26.831999999992036)
assert negangle.signed_dms.sign == -1
assert negangle.signed_dms.d == 54
assert negangle.signed_dms.m == 7
assert_allclose(negangle.signed_dms.s, 26.831999999992036)
def test_angle_formatting():
"""
Tests string formatting for Angle objects
"""
"""
The string method of Angle has this signature:
def string(self, unit=DEGREE, decimal=False, sep=" ", precision=5,
pad=False):
The "decimal" parameter defaults to False since if you need to print the
Angle as a decimal, there's no need to use the "format" method (see
above).
"""
angle = Angle("54.12412", unit=u.degree)
# __str__ is the default `format`
assert str(angle) == angle.to_string()
res = "Angle as HMS: 3h36m29.7888s"
assert f"Angle as HMS: {angle.to_string(unit=u.hour)}" == res
res = "Angle as HMS: 3:36:29.7888"
assert f"Angle as HMS: {angle.to_string(unit=u.hour, sep=':')}" == res
res = "Angle as HMS: 3:36:29.79"
assert f"Angle as HMS: {angle.to_string(unit=u.hour, sep=':', precision=2)}" == res
# Note that you can provide one, two, or three separators passed as a
# tuple or list
res = "Angle as HMS: 3h36m29.7888s"
assert (
"Angle as HMS:"
f" {angle.to_string(unit=u.hour, sep=('h', 'm', 's'), precision=4)}" == res
)
res = "Angle as HMS: 3-36|29.7888"
assert (
f"Angle as HMS: {angle.to_string(unit=u.hour, sep=['-', '|'], precision=4)}"
== res
)
res = "Angle as HMS: 3-36-29.7888"
assert f"Angle as HMS: {angle.to_string(unit=u.hour, sep='-', precision=4)}" == res
res = "Angle as HMS: 03h36m29.7888s"
assert f"Angle as HMS: {angle.to_string(unit=u.hour, precision=4, pad=True)}" == res
# Same as above, in degrees
angle = Angle("3 36 29.78880", unit=u.degree)
res = "Angle as DMS: 3d36m29.7888s"
assert f"Angle as DMS: {angle.to_string(unit=u.degree)}" == res
res = "Angle as DMS: 3:36:29.7888"
assert f"Angle as DMS: {angle.to_string(unit=u.degree, sep=':')}" == res
res = "Angle as DMS: 3:36:29.79"
assert (
f"Angle as DMS: {angle.to_string(unit=u.degree, sep=':', precision=2)}" == res
)
# Note that you can provide one, two, or three separators passed as a
# tuple or list
res = "Angle as DMS: 3d36m29.7888s"
assert (
f"Angle as DMS: {angle.to_string(unit=u.deg, sep=('d', 'm', 's'), precision=4)}"
== res
)
res = "Angle as DMS: 3-36|29.7888"
assert (
f"Angle as DMS: {angle.to_string(unit=u.degree, sep=['-', '|'], precision=4)}"
== res
)
res = "Angle as DMS: 3-36-29.7888"
assert (
f"Angle as DMS: {angle.to_string(unit=u.degree, sep='-', precision=4)}" == res
)
res = "Angle as DMS: 03d36m29.7888s"
assert (
f"Angle as DMS: {angle.to_string(unit=u.degree, precision=4, pad=True)}" == res
)
res = "Angle as rad: 0.0629763rad"
assert f"Angle as rad: {angle.to_string(unit=u.radian)}" == res
res = "Angle as rad decimal: 0.0629763"
assert (
f"Angle as rad decimal: {angle.to_string(unit=u.radian, decimal=True)}" == res
)
# check negative angles
angle = Angle(-1.23456789, unit=u.degree)
angle2 = Angle(-1.23456789, unit=u.hour)
assert angle.to_string() == "-1d14m04.444404s"
assert angle.to_string(pad=True) == "-01d14m04.444404s"
assert angle.to_string(unit=u.hour) == "-0h04m56.2962936s"
assert angle2.to_string(unit=u.hour, pad=True) == "-01h14m04.444404s"
assert angle.to_string(unit=u.radian, decimal=True) == "-0.0215473"
# We should recognize units that are equal but not identical
assert angle.to_string(unit=u.hour**1) == "-0h04m56.2962936s"
def test_to_string_vector():
# Regression test for the fact that vectorize doesn't work with Numpy 1.6
assert (
Angle([1.0 / 7.0, 1.0 / 7.0], unit="deg").to_string()[0] == "0d08m34.28571429s"
)
assert Angle([1.0 / 7.0], unit="deg").to_string()[0] == "0d08m34.28571429s"
assert Angle(1.0 / 7.0, unit="deg").to_string() == "0d08m34.28571429s"
def test_angle_format_roundtripping():
"""
Ensures that the string representation of an angle can be used to create a
new valid Angle.
"""
a1 = Angle(0, unit=u.radian)
a2 = Angle(10, unit=u.degree)
a3 = Angle(0.543, unit=u.degree)
a4 = Angle("1d2m3.4s")
assert Angle(str(a1)).degree == a1.degree
assert Angle(str(a2)).degree == a2.degree
assert Angle(str(a3)).degree == a3.degree
assert Angle(str(a4)).degree == a4.degree
# also check Longitude/Latitude
ra = Longitude("1h2m3.4s")
dec = Latitude("1d2m3.4s")
assert_allclose(Angle(str(ra)).degree, ra.degree)
assert_allclose(Angle(str(dec)).degree, dec.degree)
def test_radec():
"""
Tests creation/operations of Longitude and Latitude objects
"""
"""
Longitude and Latitude are objects that are subclassed from Angle. As with Angle, Longitude
and Latitude can parse any unambiguous format (tuples, formatted strings, etc.).
The intention is not to create an Angle subclass for every possible
coordinate object (e.g. galactic l, galactic b). However, equatorial Longitude/Latitude
are so prevalent in astronomy that it's worth creating ones for these
units. They will be noted as "special" in the docs and use of the just the
Angle class is to be used for other coordinate systems.
"""
with pytest.raises(u.UnitsError):
ra = Longitude("4:08:15.162342") # error - hours or degrees?
with pytest.raises(u.UnitsError):
ra = Longitude("-4:08:15.162342")
# the "smart" initializer allows >24 to automatically do degrees, but the
# Angle-based one does not
# TODO: adjust in 0.3 for whatever behavior is decided on
# ra = Longitude("26:34:15.345634") # unambiguous b/c hours don't go past 24
# assert_allclose(ra.degree, 26.570929342)
with pytest.raises(u.UnitsError):
ra = Longitude("26:34:15.345634")
# ra = Longitude(68)
with pytest.raises(u.UnitsError):
ra = Longitude(68)
with pytest.raises(u.UnitsError):
ra = Longitude(12)
with pytest.raises(ValueError):
ra = Longitude("garbage containing a d and no units")
ra = Longitude("12h43m23s")
assert_allclose(ra.hour, 12.7230555556)
# TODO: again, fix based on >24 behavior
# ra = Longitude((56,14,52.52))
with pytest.raises(u.UnitsError):
ra = Longitude((56, 14, 52.52))
with pytest.raises(u.UnitsError):
ra = Longitude((12, 14, 52)) # ambiguous w/o units
with pytest.warns(AstropyDeprecationWarning, match="hms_to_hours"):
ra = Longitude((12, 14, 52), unit=u.hour)
# Units can be specified
ra = Longitude("4:08:15.162342", unit=u.hour)
# TODO: this was the "smart" initializer behavior - adjust in 0.3 appropriately
# Where Longitude values are commonly found in hours or degrees, declination is
# nearly always specified in degrees, so this is the default.
# dec = Latitude("-41:08:15.162342")
with pytest.raises(u.UnitsError):
dec = Latitude("-41:08:15.162342")
dec = Latitude("-41:08:15.162342", unit=u.degree) # same as above
def test_negative_zero_dms():
# Test for DMS parser
a = Angle("-00:00:10", u.deg)
assert_allclose(a.degree, -10.0 / 3600.0)
# Unicode minus
a = Angle("−00:00:10", u.deg)
assert_allclose(a.degree, -10.0 / 3600.0)
def test_negative_zero_dm():
# Test for DM parser
a = Angle("-00:10", u.deg)
assert_allclose(a.degree, -10.0 / 60.0)
def test_negative_zero_hms():
# Test for HMS parser
a = Angle("-00:00:10", u.hour)
assert_allclose(a.hour, -10.0 / 3600.0)
def test_negative_zero_hm():
# Test for HM parser
a = Angle("-00:10", u.hour)
assert_allclose(a.hour, -10.0 / 60.0)
def test_negative_sixty_hm():
# Test for HM parser
with pytest.warns(IllegalMinuteWarning):
a = Angle("-00:60", u.hour)
assert_allclose(a.hour, -1.0)
def test_plus_sixty_hm():
# Test for HM parser
with pytest.warns(IllegalMinuteWarning):
a = Angle("00:60", u.hour)
assert_allclose(a.hour, 1.0)
def test_negative_fifty_nine_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle("-00:59:60", u.deg)
assert_allclose(a.degree, -1.0)
def test_plus_fifty_nine_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle("+00:59:60", u.deg)
assert_allclose(a.degree, 1.0)
def test_negative_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle("-00:00:60", u.deg)
assert_allclose(a.degree, -1.0 / 60.0)
def test_plus_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle("+00:00:60", u.deg)
assert_allclose(a.degree, 1.0 / 60.0)
def test_angle_to_is_angle():
with pytest.warns(IllegalSecondWarning):
a = Angle("00:00:60", u.deg)
assert isinstance(a, Angle)
assert isinstance(a.to(u.rad), Angle)
def test_angle_to_quantity():
with pytest.warns(IllegalSecondWarning):
a = Angle("00:00:60", u.deg)
q = u.Quantity(a)
assert isinstance(q, u.Quantity)
assert q.unit is u.deg
def test_quantity_to_angle():
a = Angle(1.0 * u.deg)
assert isinstance(a, Angle)
with pytest.raises(u.UnitsError):
Angle(1.0 * u.meter)
a = Angle(1.0 * u.hour)
assert isinstance(a, Angle)
assert a.unit is u.hourangle
with pytest.raises(u.UnitsError):
Angle(1.0 * u.min)
def test_angle_string():
with pytest.warns(IllegalSecondWarning):
a = Angle("00:00:60", u.deg)
assert str(a) == "0d01m00s"
a = Angle("00:00:59S", u.deg)
assert str(a) == "-0d00m59s"
a = Angle("00:00:59N", u.deg)
assert str(a) == "0d00m59s"
a = Angle("00:00:59E", u.deg)
assert str(a) == "0d00m59s"
a = Angle("00:00:59W", u.deg)
assert str(a) == "-0d00m59s"
a = Angle("-00:00:10", u.hour)
assert str(a) == "-0h00m10s"
a = Angle("00:00:59E", u.hour)
assert str(a) == "0h00m59s"
a = Angle("00:00:59W", u.hour)
assert str(a) == "-0h00m59s"
a = Angle(3.2, u.radian)
assert str(a) == "3.2rad"
a = Angle(4.2, u.microarcsecond)
assert str(a) == "4.2uarcsec"
a = Angle("1.0uarcsec")
assert a.value == 1.0
assert a.unit == u.microarcsecond
a = Angle("1.0uarcsecN")
assert a.value == 1.0
assert a.unit == u.microarcsecond
a = Angle("1.0uarcsecS")
assert a.value == -1.0
assert a.unit == u.microarcsecond
a = Angle("1.0uarcsecE")
assert a.value == 1.0
assert a.unit == u.microarcsecond
a = Angle("1.0uarcsecW")
assert a.value == -1.0
assert a.unit == u.microarcsecond
a = Angle("3d")
assert_allclose(a.value, 3.0)
assert a.unit == u.degree
a = Angle("3dN")
assert str(a) == "3d00m00s"
assert a.unit == u.degree
a = Angle("3dS")
assert str(a) == "-3d00m00s"
assert a.unit == u.degree
a = Angle("3dE")
assert str(a) == "3d00m00s"
assert a.unit == u.degree
a = Angle("3dW")
assert str(a) == "-3d00m00s"
assert a.unit == u.degree
a = Angle('10"')
assert_allclose(a.value, 10.0)
assert a.unit == u.arcsecond
a = Angle("10'N")
assert_allclose(a.value, 10.0)
assert a.unit == u.arcminute
a = Angle("10'S")
assert_allclose(a.value, -10.0)
assert a.unit == u.arcminute
a = Angle("10'E")
assert_allclose(a.value, 10.0)
assert a.unit == u.arcminute
a = Angle("10'W")
assert_allclose(a.value, -10.0)
assert a.unit == u.arcminute
a = Angle("45°55′12″N")
assert str(a) == "45d55m12s"
assert_allclose(a.value, 45.92)
assert a.unit == u.deg
a = Angle("45°55′12″S")
assert str(a) == "-45d55m12s"
assert_allclose(a.value, -45.92)
assert a.unit == u.deg
a = Angle("45°55′12″E")
assert str(a) == "45d55m12s"
assert_allclose(a.value, 45.92)
assert a.unit == u.deg
a = Angle("45°55′12″W")
assert str(a) == "-45d55m12s"
assert_allclose(a.value, -45.92)
assert a.unit == u.deg
with pytest.raises(ValueError):
Angle("00h00m10sN")
with pytest.raises(ValueError):
Angle("45°55′12″NS")
def test_angle_repr():
assert "Angle" in repr(Angle(0, u.deg))
assert "Longitude" in repr(Longitude(0, u.deg))
assert "Latitude" in repr(Latitude(0, u.deg))
a = Angle(0, u.deg)
repr(a)
def test_large_angle_representation():
"""Test that angles above 360 degrees can be output as strings,
in repr, str, and to_string. (regression test for #1413)"""
a = Angle(350, u.deg) + Angle(350, u.deg)
a.to_string()
a.to_string(u.hourangle)
repr(a)
repr(a.to(u.hourangle))
str(a)
str(a.to(u.hourangle))
def test_wrap_at_inplace():
a = Angle([-20, 150, 350, 360] * u.deg)
out = a.wrap_at("180d", inplace=True)
assert out is None
assert np.all(a.degree == np.array([-20.0, 150.0, -10.0, 0.0]))
def test_latitude():
with pytest.raises(ValueError):
lat = Latitude(["91d", "89d"])
with pytest.raises(ValueError):
lat = Latitude("-91d")
lat = Latitude(["90d", "89d"])
# check that one can get items
assert lat[0] == 90 * u.deg
assert lat[1] == 89 * u.deg
# and that comparison with angles works
assert np.all(lat == Angle(["90d", "89d"]))
# check setitem works
lat[1] = 45.0 * u.deg
assert np.all(lat == Angle(["90d", "45d"]))
# but not with values out of range
with pytest.raises(ValueError):
lat[0] = 90.001 * u.deg
with pytest.raises(ValueError):
lat[0] = -90.001 * u.deg
# these should also not destroy input (#1851)
assert np.all(lat == Angle(["90d", "45d"]))
# conserve type on unit change (closes #1423)
angle = lat.to("radian")
assert type(angle) is Latitude
# but not on calculations
angle = lat - 190 * u.deg
assert type(angle) is Angle
assert angle[0] == -100 * u.deg
lat = Latitude("80d")
angle = lat / 2.0
assert type(angle) is Angle
assert angle == 40 * u.deg
angle = lat * 2.0
assert type(angle) is Angle
assert angle == 160 * u.deg
angle = -lat
assert type(angle) is Angle
assert angle == -80 * u.deg
# Test errors when trying to interoperate with longitudes.
with pytest.raises(
TypeError, match="A Latitude angle cannot be created from a Longitude angle"
):
lon = Longitude(10, "deg")
lat = Latitude(lon)
with pytest.raises(
TypeError, match="A Longitude angle cannot be assigned to a Latitude angle"
):
lon = Longitude(10, "deg")
lat = Latitude([20], "deg")
lat[0] = lon
# Check we can work around the Lat vs Long checks by casting explicitly to Angle.
lon = Longitude(10, "deg")
lat = Latitude(Angle(lon))
assert lat.value == 10.0
# Check setitem.
lon = Longitude(10, "deg")
lat = Latitude([20], "deg")
lat[0] = Angle(lon)
assert lat.value[0] == 10.0
def test_longitude():
# Default wrapping at 360d with an array input
lon = Longitude(["370d", "88d"])
assert np.all(lon == Longitude(["10d", "88d"]))
assert np.all(lon == Angle(["10d", "88d"]))
# conserve type on unit change and keep wrap_angle (closes #1423)
angle = lon.to("hourangle")
assert type(angle) is Longitude
assert angle.wrap_angle == lon.wrap_angle
angle = lon[0]
assert type(angle) is Longitude
assert angle.wrap_angle == lon.wrap_angle
angle = lon[1:]
assert type(angle) is Longitude
assert angle.wrap_angle == lon.wrap_angle
# but not on calculations
angle = lon / 2.0
assert np.all(angle == Angle(["5d", "44d"]))
assert type(angle) is Angle
assert not hasattr(angle, "wrap_angle")
angle = lon * 2.0 + 400 * u.deg
assert np.all(angle == Angle(["420d", "576d"]))
assert type(angle) is Angle
# Test setting a mutable value and having it wrap
lon[1] = -10 * u.deg
assert np.all(lon == Angle(["10d", "350d"]))
# Test wrapping and try hitting some edge cases
lon = Longitude(np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian)
assert np.all(lon.degree == np.array([0.0, 90, 180, 270, 0]))
lon = Longitude(
np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian, wrap_angle="180d"
)
assert np.all(lon.degree == np.array([0.0, 90, -180, -90, 0]))
# Wrap on setting wrap_angle property (also test auto-conversion of wrap_angle to an Angle)
lon = Longitude(np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian)
lon.wrap_angle = "180d"
assert np.all(lon.degree == np.array([0.0, 90, -180, -90, 0]))
lon = Longitude("460d")
assert lon == Angle("100d")
lon.wrap_angle = "90d"
assert lon == Angle("-260d")
# check that if we initialize a longitude with another longitude,
# wrap_angle is kept by default
lon2 = Longitude(lon)
assert lon2.wrap_angle == lon.wrap_angle
# but not if we explicitly set it
lon3 = Longitude(lon, wrap_angle="180d")
assert lon3.wrap_angle == 180 * u.deg
# check that wrap_angle is always an Angle
lon = Longitude(lon, wrap_angle=Longitude(180 * u.deg))
assert lon.wrap_angle == 180 * u.deg
assert lon.wrap_angle.__class__ is Angle
# check that wrap_angle is not copied
wrap_angle = 180 * u.deg
lon = Longitude(lon, wrap_angle=wrap_angle)
assert lon.wrap_angle == 180 * u.deg
assert np.may_share_memory(lon.wrap_angle, wrap_angle)
# check for problem reported in #2037 about Longitude initializing to -0
lon = Longitude(0, u.deg)
lonstr = lon.to_string()
assert not lonstr.startswith("-")
# also make sure dtype is correctly conserved
assert Longitude(0, u.deg, dtype=float).dtype == np.dtype(float)
assert Longitude(0, u.deg, dtype=int).dtype == np.dtype(int)
# Test errors when trying to interoperate with latitudes.
with pytest.raises(
TypeError, match="A Longitude angle cannot be created from a Latitude angle"
):
lat = Latitude(10, "deg")
lon = Longitude(lat)
with pytest.raises(
TypeError, match="A Latitude angle cannot be assigned to a Longitude angle"
):
lat = Latitude(10, "deg")
lon = Longitude([20], "deg")
lon[0] = lat
# Check we can work around the Lat vs Long checks by casting explicitly to Angle.
lat = Latitude(10, "deg")
lon = Longitude(Angle(lat))
assert lon.value == 10.0
# Check setitem.
lat = Latitude(10, "deg")
lon = Longitude([20], "deg")
lon[0] = Angle(lat)
assert lon.value[0] == 10.0
def test_wrap_at():
a = Angle([-20, 150, 350, 360] * u.deg)
assert np.all(a.wrap_at(360 * u.deg).degree == np.array([340.0, 150.0, 350.0, 0.0]))
assert np.all(
a.wrap_at(Angle(360, unit=u.deg)).degree == np.array([340.0, 150.0, 350.0, 0.0])
)
assert np.all(a.wrap_at("360d").degree == np.array([340.0, 150.0, 350.0, 0.0]))
assert np.all(a.wrap_at("180d").degree == np.array([-20.0, 150.0, -10.0, 0.0]))
assert np.all(
a.wrap_at(np.pi * u.rad).degree == np.array([-20.0, 150.0, -10.0, 0.0])
)
# Test wrapping a scalar Angle
a = Angle("190d")
assert a.wrap_at("180d") == Angle("-170d")
a = Angle(np.arange(-1000.0, 1000.0, 0.125), unit=u.deg)
for wrap_angle in (270, 0.2, 0.0, 360.0, 500, -2000.125):
aw = a.wrap_at(wrap_angle * u.deg)
assert np.all(aw.degree >= wrap_angle - 360.0)
assert np.all(aw.degree < wrap_angle)
aw = a.to(u.rad).wrap_at(wrap_angle * u.deg)
assert np.all(aw.degree >= wrap_angle - 360.0)
assert np.all(aw.degree < wrap_angle)
def test_is_within_bounds():
a = Angle([-20, 150, 350] * u.deg)
assert a.is_within_bounds("0d", "360d") is False
assert a.is_within_bounds(None, "360d") is True
assert a.is_within_bounds(-30 * u.deg, None) is True
a = Angle("-20d")
assert a.is_within_bounds("0d", "360d") is False
assert a.is_within_bounds(None, "360d") is True
assert a.is_within_bounds(-30 * u.deg, None) is True
def test_angle_mismatched_unit():
a = Angle("+6h7m8s", unit=u.degree)
assert_allclose(a.value, 91.78333333333332)
def test_regression_formatting_negative():
# Regression test for a bug that caused:
#
# >>> Angle(-1., unit='deg').to_string()
# '-1d00m-0s'
assert Angle(-0.0, unit="deg").to_string() == "-0d00m00s"
assert Angle(-1.0, unit="deg").to_string() == "-1d00m00s"
assert Angle(-0.0, unit="hour").to_string() == "-0h00m00s"
assert Angle(-1.0, unit="hour").to_string() == "-1h00m00s"
def test_regression_formatting_default_precision():
# Regression test for issue #11140
assert Angle("10:20:30.12345678d").to_string() == "10d20m30.12345678s"
assert Angle("10d20m30.123456784564s").to_string() == "10d20m30.12345678s"
assert Angle("10d20m30.123s").to_string() == "10d20m30.123s"
def test_empty_sep():
a = Angle("05h04m31.93830s")
assert a.to_string(sep="", precision=2, pad=True) == "050431.94"
def test_create_tuple():
"""
Tests creation of an angle with an (h,m,s) tuple
(d, m, s) tuples are not tested because of sign ambiguity issues (#13162)
"""
with pytest.warns(AstropyDeprecationWarning, match="hms_to_hours"):
a1 = Angle((1, 30, 0), unit=u.hourangle)
assert a1.value == 1.5
def test_list_of_quantities():
a1 = Angle([1 * u.deg, 1 * u.hourangle])
assert a1.unit == u.deg
assert_allclose(a1.value, [1, 15])
a2 = Angle([1 * u.hourangle, 1 * u.deg], u.deg)
assert a2.unit == u.deg
assert_allclose(a2.value, [15, 1])
def test_multiply_divide():
# Issue #2273
a1 = Angle([1, 2, 3], u.deg)
a2 = Angle([4, 5, 6], u.deg)
a3 = a1 * a2
assert_allclose(a3.value, [4, 10, 18])
assert a3.unit == (u.deg * u.deg)
a3 = a1 / a2
assert_allclose(a3.value, [0.25, 0.4, 0.5])
assert a3.unit == u.dimensionless_unscaled
def test_mixed_string_and_quantity():
a1 = Angle(["1d", 1.0 * u.deg])
assert_array_equal(a1.value, [1.0, 1.0])
assert a1.unit == u.deg
a2 = Angle(["1d", 1 * u.rad * np.pi, "3d"])
assert_array_equal(a2.value, [1.0, 180.0, 3.0])
assert a2.unit == u.deg
def test_array_angle_tostring():
aobj = Angle([1, 2], u.deg)
assert aobj.to_string().dtype.kind == "U"
assert np.all(aobj.to_string() == ["1d00m00s", "2d00m00s"])
def test_wrap_at_without_new():
"""
Regression test for subtle bugs from situations where an Angle is
created via numpy channels that don't do the standard __new__ but instead
depend on array_finalize to set state. Longitude is used because the
bug was in its _wrap_angle not getting initialized correctly
"""
l1 = Longitude([1] * u.deg)
l2 = Longitude([2] * u.deg)
l = np.concatenate([l1, l2])
assert l._wrap_angle is not None
def test__str__():
"""
Check the __str__ method used in printing the Angle
"""
# scalar angle
scangle = Angle("10.2345d")
strscangle = scangle.__str__()
assert strscangle == "10d14m04.2s"
# non-scalar array angles
arrangle = Angle(["10.2345d", "-20d"])
strarrangle = arrangle.__str__()
assert strarrangle == "[10d14m04.2s -20d00m00s]"
# summarizing for large arrays, ... should appear
bigarrangle = Angle(np.ones(10000), u.deg)
assert "..." in bigarrangle.__str__()
def test_repr_latex():
"""
Check the _repr_latex_ method, used primarily by IPython notebooks
"""
# try with both scalar
scangle = Angle(2.1, u.deg)
rlscangle = scangle._repr_latex_()
# and array angles
arrangle = Angle([1, 2.1], u.deg)
rlarrangle = arrangle._repr_latex_()
assert rlscangle == r"$2^\circ06{}^\prime00{}^{\prime\prime}$"
assert rlscangle.split("$")[1] in rlarrangle
# make sure the ... appears for large arrays
bigarrangle = Angle(np.ones(50000) / 50000.0, u.deg)
assert "..." in bigarrangle._repr_latex_()
def test_angle_with_cds_units_enabled():
"""Regression test for #5350
Especially the example in
https://github.com/astropy/astropy/issues/5350#issuecomment-248770151
"""
# the problem is with the parser, so remove it temporarily
from astropy.coordinates.angle_formats import _AngleParser
from astropy.units import cds
del _AngleParser._thread_local._parser
with cds.enable():
Angle("5d")
del _AngleParser._thread_local._parser
Angle("5d")
def test_longitude_nan():
# Check that passing a NaN to Longitude doesn't raise a warning
Longitude([0, np.nan, 1] * u.deg)
def test_latitude_nan():
# Check that passing a NaN to Latitude doesn't raise a warning
Latitude([0, np.nan, 1] * u.deg)
def test_angle_wrap_at_nan():
# Check that no attempt is made to wrap a NaN angle
angle = Angle([0, np.nan, 1] * u.deg)
angle.flags.writeable = False # to force an error if a write is attempted
angle.wrap_at(180 * u.deg, inplace=True)
def test_angle_multithreading():
"""
Regression test for issue #7168
"""
angles = ["00:00:00"] * 10000
def parse_test(i=0):
Angle(angles, unit="hour")
for i in range(10):
threading.Thread(target=parse_test, args=(i,)).start()
@pytest.mark.parametrize("cls", [Angle, Longitude, Latitude])
@pytest.mark.parametrize(
"input, expstr, exprepr",
[
(np.nan * u.deg, "nan", "nan deg"),
([np.nan, 5, 0] * u.deg, "[nan 5d00m00s 0d00m00s]", "[nan, 5., 0.] deg"),
([6, np.nan, 0] * u.deg, "[6d00m00s nan 0d00m00s]", "[6., nan, 0.] deg"),
([np.nan, np.nan, np.nan] * u.deg, "[nan nan nan]", "[nan, nan, nan] deg"),
(np.nan * u.hour, "nan", "nan hourangle"),
([np.nan, 5, 0] * u.hour, "[nan 5h00m00s 0h00m00s]", "[nan, 5., 0.] hourangle"),
([6, np.nan, 0] * u.hour, "[6h00m00s nan 0h00m00s]", "[6., nan, 0.] hourangle"),
(
[np.nan, np.nan, np.nan] * u.hour,
"[nan nan nan]",
"[nan, nan, nan] hourangle",
),
(np.nan * u.rad, "nan", "nan rad"),
([np.nan, 1, 0] * u.rad, "[nan 1rad 0rad]", "[nan, 1., 0.] rad"),
([1.50, np.nan, 0] * u.rad, "[1.5rad nan 0rad]", "[1.5, nan, 0.] rad"),
([np.nan, np.nan, np.nan] * u.rad, "[nan nan nan]", "[nan, nan, nan] rad"),
],
)
def test_str_repr_angles_nan(cls, input, expstr, exprepr):
"""
Regression test for issue #11473
"""
q = cls(input)
assert str(q) == expstr
# Deleting whitespaces since repr appears to be adding them for some values
# making the test fail.
assert repr(q).replace(" ", "") == f"<{cls.__name__}{exprepr}>".replace(" ", "")
@pytest.mark.parametrize("sign", (-1, 1))
@pytest.mark.parametrize(
"value,expected_value,dtype,expected_dtype",
[
(np.pi / 2, np.pi / 2, None, np.float64),
(np.pi / 2, np.pi / 2, np.float64, np.float64),
(np.float32(np.pi / 2), np.float32(np.pi / 2), None, np.float32),
(np.float32(np.pi / 2), np.float32(np.pi / 2), np.float32, np.float32),
# these cases would require coercing the float32 value to the float64 value
# making validate have side effects, so it's not implemented for now
# (np.float32(np.pi / 2), np.pi / 2, np.float64, np.float64),
# (np.float32(-np.pi / 2), -np.pi / 2, np.float64, np.float64),
],
)
def test_latitude_limits(value, expected_value, dtype, expected_dtype, sign):
"""
Test that the validation of the Latitude value range in radians works
in both float32 and float64.
As discussed in issue #13708, before, the float32 represenation of pi/2
was rejected as invalid because the comparison always used the float64
representation.
"""
# this prevents upcasting to float64 as sign * value would do
if sign < 0:
value = -value
expected_value = -expected_value
result = Latitude(value, u.rad, dtype=dtype)
assert result.value == expected_value
assert result.dtype == expected_dtype
assert result.unit == u.rad
@pytest.mark.parametrize(
"value,dtype",
[
(0.50001 * np.pi, np.float32),
(np.float32(0.50001 * np.pi), np.float32),
(0.50001 * np.pi, np.float64),
],
)
def test_latitude_out_of_limits(value, dtype):
"""
Test that values slightly larger than pi/2 are rejected for different dtypes.
Test cases for issue #13708
"""
with pytest.raises(ValueError, match=r"Latitude angle\(s\) must be within.*"):
Latitude(value, u.rad, dtype=dtype)
def test_angle_pickle_to_string():
"""
Ensure that after pickling we can still do to_string on hourangle.
Regression test for gh-13923.
"""
angle = Angle(0.25 * u.hourangle)
expected = angle.to_string()
via_pickle = pickle.loads(pickle.dumps(angle))
via_pickle_string = via_pickle.to_string() # This used to fail.
assert via_pickle_string == expected
|
ce35e0c92eb2e8bef7241047d72448dfafc45fe316dc9a73d2bf043171a5798f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the projected separation stuff
"""
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import Angle, Distance
from astropy.coordinates.builtin_frames import FK5, ICRS, Galactic
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
# lon1, lat1, lon2, lat2 in degrees
coords = [
(1, 0, 0, 0),
(0, 1, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1),
(0, 0, 10, 0),
(0, 0, 90, 0),
(0, 0, 180, 0),
(0, 45, 0, -45),
(0, 60, 0, -30),
(-135, -15, 45, 15),
(100, -89, -80, 89),
(0, 0, 0, 0),
(0, 0, 1.0 / 60.0, 1.0 / 60.0),
]
correct_seps = [1, 1, 1, 1, 10, 90, 180, 90, 90, 180, 180, 0, 0.023570225877234643]
correctness_margin = 2e-10
def test_angsep():
"""
Tests that the angular separation object also behaves correctly.
"""
from astropy.coordinates.angle_utilities import angular_separation
# check it both works with floats in radians, Quantities, or Angles
for conv in (np.deg2rad, lambda x: u.Quantity(x, "deg"), lambda x: Angle(x, "deg")):
for (lon1, lat1, lon2, lat2), corrsep in zip(coords, correct_seps):
angsep = angular_separation(conv(lon1), conv(lat1), conv(lon2), conv(lat2))
assert np.fabs(angsep - conv(corrsep)) < conv(correctness_margin)
def test_fk5_seps():
"""
This tests if `separation` works for FK5 objects.
This is a regression test for github issue #891
"""
a = FK5(1.0 * u.deg, 1.0 * u.deg)
b = FK5(2.0 * u.deg, 2.0 * u.deg)
a.separation(b)
def test_proj_separations():
"""
Test angular separation functionality
"""
c1 = ICRS(ra=0 * u.deg, dec=0 * u.deg)
c2 = ICRS(ra=0 * u.deg, dec=1 * u.deg)
sep = c2.separation(c1)
# returns an Angle object
assert isinstance(sep, Angle)
assert_allclose(sep.degree, 1.0)
assert_allclose(sep.arcminute, 60.0)
# these operations have ambiguous interpretations for points on a sphere
with pytest.raises(TypeError):
c1 + c2
with pytest.raises(TypeError):
c1 - c2
ngp = Galactic(l=0 * u.degree, b=90 * u.degree)
ncp = ICRS(ra=0 * u.degree, dec=90 * u.degree)
# if there is a defined conversion between the relevant coordinate systems,
# it will be automatically performed to get the right angular separation
assert_allclose(
ncp.separation(ngp.transform_to(ICRS())).degree, ncp.separation(ngp).degree
)
# distance from the north galactic pole to celestial pole
assert_allclose(ncp.separation(ngp.transform_to(ICRS())).degree, 62.87174758503201)
def test_3d_separations():
"""
Test 3D separation functionality
"""
c1 = ICRS(ra=1 * u.deg, dec=1 * u.deg, distance=9 * u.kpc)
c2 = ICRS(ra=1 * u.deg, dec=1 * u.deg, distance=10 * u.kpc)
sep3d = c2.separation_3d(c1)
assert isinstance(sep3d, Distance)
assert_allclose(sep3d - 1 * u.kpc, 0 * u.kpc, atol=1e-12 * u.kpc)
|
438ed2fb23b4dfeb4284133c0e8d0e2caeb63287f48924871fe5ed8304ca3cf4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is the APE5 coordinates API document re-written to work as a series of test
functions.
Note that new tests for coordinates functionality should generally *not* be
added to this file - instead, add them to other appropriate test modules in
this package, like ``test_sky_coord.py``, ``test_frames.py``, or
``test_representation.py``. This file is instead meant mainly to keep track of
deviations from the original APE5 plan.
"""
import numpy as np
import pytest
from numpy import testing as npt
from astropy import coordinates as coords
from astropy import time
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.units import allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY
def test_representations_api():
from astropy.coordinates import Angle, Distance, Latitude, Longitude
from astropy.coordinates.representation import (
CartesianRepresentation,
PhysicsSphericalRepresentation,
SphericalRepresentation,
UnitSphericalRepresentation,
)
# <-----------------Classes for representation of coordinate data-------------->
# These classes inherit from a common base class and internally contain Quantity
# objects, which are arrays (although they may act as scalars, like numpy's
# length-0 "arrays")
# They can be initialized with a variety of ways that make intuitive sense.
# Distance is optional.
UnitSphericalRepresentation(lon=8 * u.hour, lat=5 * u.deg)
UnitSphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg)
SphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg, distance=10 * u.kpc)
# In the initial implementation, the lat/lon/distance arguments to the
# initializer must be in order. A *possible* future change will be to allow
# smarter guessing of the order. E.g. `Latitude` and `Longitude` objects can be
# given in any order.
UnitSphericalRepresentation(Longitude(8, u.hour), Latitude(5, u.deg))
SphericalRepresentation(
Longitude(8, u.hour), Latitude(5, u.deg), Distance(10, u.kpc)
)
# Arrays of any of the inputs are fine
UnitSphericalRepresentation(lon=[8, 9] * u.hourangle, lat=[5, 6] * u.deg)
# Default is to copy arrays, but optionally, it can be a reference
UnitSphericalRepresentation(
lon=[8, 9] * u.hourangle, lat=[5, 6] * u.deg, copy=False
)
# strings are parsed by `Latitude` and `Longitude` constructors, so no need to
# implement parsing in the Representation classes
UnitSphericalRepresentation(lon=Angle("2h6m3.3s"), lat=Angle("0.1rad"))
# Or, you can give `Quantity`s with keywords, and they will be internally
# converted to Angle/Distance
c1 = SphericalRepresentation(
lon=8 * u.hourangle, lat=5 * u.deg, distance=10 * u.kpc
)
# Can also give another representation object with the `reprobj` keyword.
c2 = SphericalRepresentation.from_representation(c1)
# distance, lat, and lon typically will just match in shape
SphericalRepresentation(
lon=[8, 9] * u.hourangle, lat=[5, 6] * u.deg, distance=[10, 11] * u.kpc
)
# if the inputs are not the same, if possible they will be broadcast following
# numpy's standard broadcasting rules.
c2 = SphericalRepresentation(
lon=[8, 9] * u.hourangle, lat=[5, 6] * u.deg, distance=10 * u.kpc
)
assert len(c2.distance) == 2
# when they can't be broadcast, it is a ValueError (same as Numpy)
with pytest.raises(ValueError):
c2 = UnitSphericalRepresentation(
lon=[8, 9, 10] * u.hourangle, lat=[5, 6] * u.deg
)
# It's also possible to pass in scalar quantity lists with mixed units. These
# are converted to array quantities following the same rule as `Quantity`: all
# elements are converted to match the first element's units.
c2 = UnitSphericalRepresentation(
lon=Angle([8 * u.hourangle, 135 * u.deg]),
lat=Angle([5 * u.deg, (6 * np.pi / 180) * u.rad]),
)
assert c2.lat.unit == u.deg and c2.lon.unit == u.hourangle
npt.assert_almost_equal(c2.lon[1].value, 9)
# The Quantity initializer itself can also be used to force the unit even if the
# first element doesn't have the right unit
lon = u.Quantity([120 * u.deg, 135 * u.deg], u.hourangle)
lat = u.Quantity([(5 * np.pi / 180) * u.rad, 0.4 * u.hourangle], u.deg)
c2 = UnitSphericalRepresentation(lon, lat)
# regardless of how input, the `lat` and `lon` come out as angle/distance
assert isinstance(c1.lat, Angle)
assert isinstance(
c1.lat, Latitude
) # `Latitude` is an `~astropy.coordinates.Angle` subclass
assert isinstance(c1.distance, Distance)
# but they are read-only, as representations are immutable once created
with pytest.raises(AttributeError):
c1.lat = Latitude(5, u.deg)
# Note that it is still possible to modify the array in-place, but this is not
# sanctioned by the API, as this would prevent things like caching.
c2.lat[:] = [0] * u.deg # possible, but NOT SUPPORTED
# To address the fact that there are various other conventions for how spherical
# coordinates are defined, other conventions can be included as new classes.
# Later there may be other conventions that we implement - for now just the
# physics convention, as it is one of the most common cases.
_ = PhysicsSphericalRepresentation(phi=120 * u.deg, theta=85 * u.deg, r=3 * u.kpc)
# first dimension must be length-3 if a lone `Quantity` is passed in.
c1 = CartesianRepresentation(np.random.randn(3, 100) * u.kpc)
assert c1.xyz.shape[0] == 3
assert c1.xyz.unit == u.kpc
assert c1.x.shape[0] == 100
assert c1.y.shape[0] == 100
assert c1.z.shape[0] == 100
# can also give each as separate keywords
CartesianRepresentation(
x=np.random.randn(100) * u.kpc,
y=np.random.randn(100) * u.kpc,
z=np.random.randn(100) * u.kpc,
)
# if the units don't match but are all distances, they will automatically be
# converted to match `x`
xarr, yarr, zarr = np.random.randn(3, 100)
c1 = CartesianRepresentation(x=xarr * u.kpc, y=yarr * u.kpc, z=zarr * u.kpc)
c2 = CartesianRepresentation(x=xarr * u.kpc, y=yarr * u.kpc, z=zarr * u.pc)
assert c1.xyz.unit == c2.xyz.unit == u.kpc
assert_allclose((c1.z / 1000) - c2.z, 0 * u.kpc, atol=1e-10 * u.kpc)
# representations convert into other representations via `represent_as`
srep = SphericalRepresentation(lon=90 * u.deg, lat=0 * u.deg, distance=1 * u.pc)
crep = srep.represent_as(CartesianRepresentation)
assert_allclose(crep.x, 0 * u.pc, atol=1e-10 * u.pc)
assert_allclose(crep.y, 1 * u.pc, atol=1e-10 * u.pc)
assert_allclose(crep.z, 0 * u.pc, atol=1e-10 * u.pc)
# The functions that actually do the conversion are defined via methods on the
# representation classes. This may later be expanded into a full registerable
# transform graph like the coordinate frames, but initially it will be a simpler
# method system
def test_frame_api():
from astropy.coordinates.builtin_frames import FK5, ICRS
from astropy.coordinates.representation import (
SphericalRepresentation,
UnitSphericalRepresentation,
)
# <--------------------Reference Frame/"Low-level" classes--------------------->
# The low-level classes have a dual role: they act as specifiers of coordinate
# frames and they *may* also contain data as one of the representation objects,
# in which case they are the actual coordinate objects themselves.
# They can always accept a representation as a first argument
icrs = ICRS(UnitSphericalRepresentation(lon=8 * u.hour, lat=5 * u.deg))
# which is stored as the `data` attribute
assert icrs.data.lat == 5 * u.deg
assert icrs.data.lon == 8 * u.hourangle
# Frames that require additional information like equinoxs or obstimes get them
# as keyword parameters to the frame constructor. Where sensible, defaults are
# used. E.g., FK5 is almost always J2000 equinox
fk5 = FK5(UnitSphericalRepresentation(lon=8 * u.hour, lat=5 * u.deg))
J2000 = time.Time("J2000")
fk5_2000 = FK5(
UnitSphericalRepresentation(lon=8 * u.hour, lat=5 * u.deg), equinox=J2000
)
assert fk5.equinox == fk5_2000.equinox
# the information required to specify the frame is immutable
J2001 = time.Time("J2001")
with pytest.raises(AttributeError):
fk5.equinox = J2001
# Similar for the representation data.
with pytest.raises(AttributeError):
fk5.data = UnitSphericalRepresentation(lon=8 * u.hour, lat=5 * u.deg)
# There is also a class-level attribute that lists the attributes needed to
# identify the frame. These include attributes like `equinox` shown above.
assert all(nm in ("equinox", "obstime") for nm in fk5.frame_attributes)
# the `frame_attributes` are called for particularly in the
# high-level class (discussed below) to allow round-tripping between various
# frames. It is also part of the public API for other similar developer /
# advanced users' use.
# The actual position information is accessed via the representation objects
assert_allclose(icrs.represent_as(SphericalRepresentation).lat, 5 * u.deg)
# shorthand for the above
assert_allclose(icrs.spherical.lat, 5 * u.deg)
assert icrs.cartesian.z.value > 0
# Many frames have a "default" representation, the one in which they are
# conventionally described, often with a special name for some of the
# coordinates. E.g., most equatorial coordinate systems are spherical with RA and
# Dec. This works simply as a shorthand for the longer form above
assert_allclose(icrs.dec, 5 * u.deg)
assert_allclose(fk5.ra, 8 * u.hourangle)
assert icrs.representation_type == SphericalRepresentation
# low-level classes can also be initialized with names valid for that representation
# and frame:
icrs_2 = ICRS(ra=8 * u.hour, dec=5 * u.deg, distance=1 * u.kpc)
assert_allclose(icrs.ra, icrs_2.ra)
# and these are taken as the default if keywords are not given:
# icrs_nokwarg = ICRS(8*u.hour, 5*u.deg, distance=1*u.kpc)
# assert icrs_nokwarg.ra == icrs_2.ra and icrs_nokwarg.dec == icrs_2.dec
# they also are capable of computing on-sky or 3d separations from each other,
# which will be a direct port of the existing methods:
coo1 = ICRS(ra=0 * u.hour, dec=0 * u.deg)
coo2 = ICRS(ra=0 * u.hour, dec=1 * u.deg)
# `separation` is the on-sky separation
assert_allclose(coo1.separation(coo2).degree, 1.0)
# while `separation_3d` includes the 3D distance information
coo3 = ICRS(ra=0 * u.hour, dec=0 * u.deg, distance=1 * u.kpc)
coo4 = ICRS(ra=0 * u.hour, dec=0 * u.deg, distance=2 * u.kpc)
assert coo3.separation_3d(coo4).kpc == 1.0
# The next example fails because `coo1` and `coo2` don't have distances
with pytest.raises(ValueError):
assert coo1.separation_3d(coo2).kpc == 1.0
# repr/str also shows info, with frame and data
# assert repr(fk5) == ''
def test_transform_api():
from astropy.coordinates.baseframe import BaseCoordinateFrame, frame_transform_graph
from astropy.coordinates.builtin_frames import FK5, ICRS
from astropy.coordinates.representation import UnitSphericalRepresentation
from astropy.coordinates.transformations import DynamicMatrixTransform
# <------------------------Transformations------------------------------------->
# Transformation functionality is the key to the whole scheme: they transform
# low-level classes from one frame to another.
# (used below but defined above in the API)
fk5 = FK5(ra=8 * u.hour, dec=5 * u.deg)
# If no data (or `None`) is given, the class acts as a specifier of a frame, but
# without any stored data.
J2001 = time.Time("J2001")
fk5_J2001_frame = FK5(equinox=J2001)
# if they do not have data, the string instead is the frame specification
assert repr(fk5_J2001_frame) == "<FK5 Frame (equinox=J2001.000)>"
# Note that, although a frame object is immutable and can't have data added, it
# can be used to create a new object that does have data by giving the
# `realize_frame` method a representation:
srep = UnitSphericalRepresentation(lon=8 * u.hour, lat=5 * u.deg)
fk5_j2001_with_data = fk5_J2001_frame.realize_frame(srep)
assert fk5_j2001_with_data.data is not None
# Now `fk5_j2001_with_data` is in the same frame as `fk5_J2001_frame`, but it
# is an actual low-level coordinate, rather than a frame without data.
# These frames are primarily useful for specifying what a coordinate should be
# transformed *into*, as they are used by the `transform_to` method
# E.g., this snippet precesses the point to the new equinox
newfk5 = fk5.transform_to(fk5_J2001_frame)
assert newfk5.equinox == J2001
# transforming to a new frame necessarily loses framespec information if that
# information is not applicable to the new frame. This means transforms are not
# always round-trippable:
fk5_2 = FK5(ra=8 * u.hour, dec=5 * u.deg, equinox=J2001)
ic_trans = fk5_2.transform_to(ICRS())
# `ic_trans` does not have an `equinox`, so now when we transform back to FK5,
# it's a *different* RA and Dec
fk5_trans = ic_trans.transform_to(FK5())
assert not allclose(fk5_2.ra, fk5_trans.ra, rtol=0, atol=1e-10 * u.deg)
# But if you explicitly give the right equinox, all is fine
fk5_trans_2 = fk5_2.transform_to(FK5(equinox=J2001))
assert_allclose(fk5_2.ra, fk5_trans_2.ra, rtol=0, atol=1e-10 * u.deg)
# Trying to transforming a frame with no data is of course an error:
with pytest.raises(ValueError):
FK5(equinox=J2001).transform_to(ICRS())
# To actually define a new transformation, the same scheme as in the
# 0.2/0.3 coordinates framework can be re-used - a graph of transform functions
# connecting various coordinate classes together. The main changes are:
# 1) The transform functions now get the frame object they are transforming the
# current data into.
# 2) Frames with additional information need to have a way to transform between
# objects of the same class, but with different framespecinfo values
# An example transform function:
class SomeNewSystem(BaseCoordinateFrame):
pass
@frame_transform_graph.transform(DynamicMatrixTransform, SomeNewSystem, FK5)
def new_to_fk5(newobj, fk5frame):
_ = newobj.obstime
_ = fk5frame.equinox
# ... build a *cartesian* transform matrix using `eq` that transforms from
# the `newobj` frame as observed at `ot` to FK5 an equinox `eq`
matrix = np.eye(3)
return matrix
# Other options for transform functions include one that simply returns the new
# coordinate object, and one that returns a cartesian matrix but does *not*
# require `newobj` or `fk5frame` - this allows optimization of the transform.
def test_highlevel_api():
J2001 = time.Time("J2001")
# <--------------------------"High-level" class-------------------------------->
# The "high-level" class is intended to wrap the lower-level classes in such a
# way that they can be round-tripped, as well as providing a variety of
# convenience functionality. This document is not intended to show *all* of the
# possible high-level functionality, rather how the high-level classes are
# initialized and interact with the low-level classes
# this creates an object that contains an `ICRS` low-level class, initialized
# identically to the first ICRS example further up.
sc = coords.SkyCoord(
coords.SphericalRepresentation(
lon=8 * u.hour, lat=5 * u.deg, distance=1 * u.kpc
),
frame="icrs",
)
# Other representations and `system` keywords delegate to the appropriate
# low-level class. The already-existing registry for user-defined coordinates
# will be used by `SkyCoordinate` to figure out what various the `system`
# keyword actually means.
sc = coords.SkyCoord(ra=8 * u.hour, dec=5 * u.deg, frame="icrs")
sc = coords.SkyCoord(l=120 * u.deg, b=5 * u.deg, frame="galactic")
# High-level classes can also be initialized directly from low-level objects
sc = coords.SkyCoord(coords.ICRS(ra=8 * u.hour, dec=5 * u.deg))
# The next example raises an error because the high-level class must always
# have position data.
with pytest.raises(ValueError):
sc = coords.SkyCoord(coords.FK5(equinox=J2001)) # raises ValueError
# similarly, the low-level object can always be accessed
# this is how it's supposed to look, but sometimes the numbers get rounded in
# funny ways
# assert repr(sc.frame) == '<ICRS Coordinate: ra=120.0 deg, dec=5.0 deg>'
rscf = repr(sc.frame)
assert rscf.startswith("<ICRS Coordinate: (ra, dec) in deg")
# and the string representation will be inherited from the low-level class.
# same deal, should loook like this, but different archituectures/ python
# versions may round the numbers differently
# assert repr(sc) == '<SkyCoord (ICRS): ra=120.0 deg, dec=5.0 deg>'
rsc = repr(sc)
assert rsc.startswith("<SkyCoord (ICRS): (ra, dec) in deg")
# Supports a variety of possible complex string formats
sc = coords.SkyCoord("8h00m00s +5d00m00.0s", frame="icrs")
# In the next example, the unit is only needed b/c units are ambiguous. In
# general, we *never* accept ambiguity
sc = coords.SkyCoord("8:00:00 +5:00:00.0", unit=(u.hour, u.deg), frame="icrs")
# The next one would yield length-2 array coordinates, because of the comma
sc = coords.SkyCoord(["8h 5d", "2°2′3″ 0.3rad"], frame="icrs")
# It should also interpret common designation styles as a coordinate
# NOT YET
# sc = coords.SkyCoord('SDSS J123456.89-012345.6', frame='icrs')
# but it should also be possible to provide formats for outputting to strings,
# similar to `Time`. This can be added right away or at a later date.
# transformation is done the same as for low-level classes, which it delegates to
sc_fk5_j2001 = sc.transform_to(coords.FK5(equinox=J2001))
assert sc_fk5_j2001.equinox == J2001
# The key difference is that the high-level class remembers frame information
# necessary for round-tripping, unlike the low-level classes:
sc1 = coords.SkyCoord(ra=8 * u.hour, dec=5 * u.deg, equinox=J2001, frame="fk5")
sc2 = sc1.transform_to("icrs")
# The next assertion succeeds, but it doesn't mean anything for ICRS, as ICRS
# isn't defined in terms of an equinox
assert sc2.equinox == J2001
# But it *is* necessary once we transform to FK5
sc3 = sc2.transform_to("fk5")
assert sc3.equinox == J2001
assert_allclose(sc1.ra, sc3.ra)
# `SkyCoord` will also include the attribute-style access that is in the
# v0.2/0.3 coordinate objects. This will *not* be in the low-level classes
sc = coords.SkyCoord(ra=8 * u.hour, dec=5 * u.deg, frame="icrs")
scgal = sc.galactic
assert str(scgal).startswith("<SkyCoord (Galactic): (l, b)")
# the existing `from_name` and `match_to_catalog_*` methods will be moved to the
# high-level class as convenience functionality.
# in remote-data test below!
# m31icrs = coords.SkyCoord.from_name('M31', frame='icrs')
# assert str(m31icrs) == '<SkyCoord (ICRS) RA=10.68471 deg, Dec=41.26875 deg>'
if HAS_SCIPY:
cat1 = coords.SkyCoord(
ra=[1, 2] * u.hr,
dec=[3, 4.01] * u.deg,
distance=[5, 6] * u.kpc,
frame="icrs",
)
cat2 = coords.SkyCoord(
ra=[1, 2, 2.01] * u.hr,
dec=[3, 4, 5] * u.deg,
distance=[5, 200, 6] * u.kpc,
frame="icrs",
)
idx1, sep2d1, dist3d1 = cat1.match_to_catalog_sky(cat2)
idx2, sep2d2, dist3d2 = cat1.match_to_catalog_3d(cat2)
assert np.any(idx1 != idx2)
# additional convenience functionality for the future should be added as methods
# on `SkyCoord`, *not* the low-level classes.
@pytest.mark.remote_data
def test_highlevel_api_remote():
m31icrs = coords.SkyCoord.from_name("M31", frame="icrs")
m31str = str(m31icrs)
assert m31str.startswith("<SkyCoord (ICRS): (ra, dec) in deg\n (")
assert m31str.endswith(")>")
assert "10.68" in m31str
assert "41.26" in m31str
# The above is essentially a replacement of the below, but tweaked so that
# small/moderate changes in what `from_name` returns don't cause the tests
# to fail
# assert str(m31icrs) == '<SkyCoord (ICRS): (ra, dec) in deg\n (10.6847083, 41.26875)>'
m31fk4 = coords.SkyCoord.from_name("M31", frame="fk4")
assert not m31icrs.is_equivalent_frame(m31fk4)
assert np.abs(m31icrs.ra - m31fk4.ra) > 0.5 * u.deg
|
337cc6906596c541c65d6f83b2d067ed767c8881cda6cbf08880002e63ac97fe | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for putting velocity differentials into SkyCoord objects.
Note: the skyoffset velocity tests are in a different file, in
test_skyoffset_transformations.py
"""
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import (
ICRS,
CartesianDifferential,
CartesianRepresentation,
Galactic,
PrecessedGeocentric,
RadialDifferential,
SkyCoord,
SphericalCosLatDifferential,
SphericalDifferential,
SphericalRepresentation,
UnitSphericalCosLatDifferential,
UnitSphericalDifferential,
UnitSphericalRepresentation,
)
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY
def test_creation_frameobjs():
i = ICRS(
1 * u.deg, 2 * u.deg, pm_ra_cosdec=0.2 * u.mas / u.yr, pm_dec=0.1 * u.mas / u.yr
)
sc = SkyCoord(i)
for attrnm in ["ra", "dec", "pm_ra_cosdec", "pm_dec"]:
assert_quantity_allclose(getattr(i, attrnm), getattr(sc, attrnm))
sc_nod = SkyCoord(ICRS(1 * u.deg, 2 * u.deg))
for attrnm in ["ra", "dec"]:
assert_quantity_allclose(getattr(sc, attrnm), getattr(sc_nod, attrnm))
def test_creation_attrs():
sc1 = SkyCoord(
1 * u.deg,
2 * u.deg,
pm_ra_cosdec=0.2 * u.mas / u.yr,
pm_dec=0.1 * u.mas / u.yr,
frame="fk5",
)
assert_quantity_allclose(sc1.ra, 1 * u.deg)
assert_quantity_allclose(sc1.dec, 2 * u.deg)
assert_quantity_allclose(sc1.pm_ra_cosdec, 0.2 * u.arcsec / u.kyr)
assert_quantity_allclose(sc1.pm_dec, 0.1 * u.arcsec / u.kyr)
sc2 = SkyCoord(
1 * u.deg,
2 * u.deg,
pm_ra=0.2 * u.mas / u.yr,
pm_dec=0.1 * u.mas / u.yr,
differential_type=SphericalDifferential,
)
assert_quantity_allclose(sc2.ra, 1 * u.deg)
assert_quantity_allclose(sc2.dec, 2 * u.deg)
assert_quantity_allclose(sc2.pm_ra, 0.2 * u.arcsec / u.kyr)
assert_quantity_allclose(sc2.pm_dec, 0.1 * u.arcsec / u.kyr)
sc3 = SkyCoord(
"1:2:3 4:5:6",
pm_ra_cosdec=0.2 * u.mas / u.yr,
pm_dec=0.1 * u.mas / u.yr,
unit=(u.hour, u.deg),
)
assert_quantity_allclose(
sc3.ra, 1 * u.hourangle + 2 * u.arcmin * 15 + 3 * u.arcsec * 15
)
assert_quantity_allclose(sc3.dec, 4 * u.deg + 5 * u.arcmin + 6 * u.arcsec)
# might as well check with sillier units?
assert_quantity_allclose(
sc3.pm_ra_cosdec, 1.2776637006616473e-07 * u.arcmin / u.fortnight
)
assert_quantity_allclose(sc3.pm_dec, 6.388318503308237e-08 * u.arcmin / u.fortnight)
def test_creation_copy_basic():
i = ICRS(
1 * u.deg, 2 * u.deg, pm_ra_cosdec=0.2 * u.mas / u.yr, pm_dec=0.1 * u.mas / u.yr
)
sc = SkyCoord(i)
sc_cpy = SkyCoord(sc)
for attrnm in ["ra", "dec", "pm_ra_cosdec", "pm_dec"]:
assert_quantity_allclose(getattr(sc, attrnm), getattr(sc_cpy, attrnm))
def test_creation_copy_rediff():
sc = SkyCoord(
1 * u.deg,
2 * u.deg,
pm_ra=0.2 * u.mas / u.yr,
pm_dec=0.1 * u.mas / u.yr,
differential_type=SphericalDifferential,
)
sc_cpy = SkyCoord(sc)
for attrnm in ["ra", "dec", "pm_ra", "pm_dec"]:
assert_quantity_allclose(getattr(sc, attrnm), getattr(sc_cpy, attrnm))
sc_newdiff = SkyCoord(sc, differential_type=SphericalCosLatDifferential)
reprepr = sc.represent_as(SphericalRepresentation, SphericalCosLatDifferential)
assert_quantity_allclose(
sc_newdiff.pm_ra_cosdec, reprepr.differentials["s"].d_lon_coslat
)
def test_creation_cartesian():
rep = CartesianRepresentation([10, 0.0, 0.0] * u.pc)
dif = CartesianDifferential([0, 100, 0.0] * u.pc / u.Myr)
rep = rep.with_differentials(dif)
c = SkyCoord(rep)
sdif = dif.represent_as(SphericalCosLatDifferential, rep)
assert_quantity_allclose(c.pm_ra_cosdec, sdif.d_lon_coslat)
def test_useful_error_missing():
sc_nod = SkyCoord(ICRS(1 * u.deg, 2 * u.deg))
try:
sc_nod.l
except AttributeError as e:
# this is double-checking the *normal* behavior
msg_l = e.args[0]
try:
sc_nod.pm_dec
except Exception as e:
msg_pm_dec = e.args[0]
assert "has no attribute" in msg_l
assert "has no associated differentials" in msg_pm_dec
# ----------------------Operations on SkyCoords w/ velocities-------------------
# define some fixtures to get baseline coordinates to try operations with
@pytest.fixture(
scope="module", params=[(False, False), (True, False), (False, True), (True, True)]
)
def sc(request):
incldist, inclrv = request.param
args = [1 * u.deg, 2 * u.deg]
kwargs = dict(pm_dec=1 * u.mas / u.yr, pm_ra_cosdec=2 * u.mas / u.yr)
if incldist:
kwargs["distance"] = 213.4 * u.pc
if inclrv:
kwargs["radial_velocity"] = 61 * u.km / u.s
return SkyCoord(*args, **kwargs)
@pytest.fixture(scope="module")
def scmany():
return SkyCoord(
ICRS(
ra=[1] * 100 * u.deg,
dec=[2] * 100 * u.deg,
pm_ra_cosdec=np.random.randn(100) * u.mas / u.yr,
pm_dec=np.random.randn(100) * u.mas / u.yr,
)
)
@pytest.fixture(scope="module")
def sc_for_sep():
return SkyCoord(
1 * u.deg, 2 * u.deg, pm_dec=1 * u.mas / u.yr, pm_ra_cosdec=2 * u.mas / u.yr
)
def test_separation(sc, sc_for_sep):
sc.separation(sc_for_sep)
def test_accessors(sc, scmany):
sc.data.differentials["s"]
sph = sc.spherical
gal = sc.galactic
if sc.data.get_name().startswith("unit") and not sc.data.differentials[
"s"
].get_name().startswith("unit"):
# this xfail can be eliminated when issue #7028 is resolved
pytest.xfail(".velocity fails if there is an RV but not distance")
sc.velocity
assert isinstance(sph, SphericalRepresentation)
assert gal.data.differentials is not None
scmany[0]
sph = scmany.spherical
gal = scmany.galactic
assert isinstance(sph, SphericalRepresentation)
assert gal.data.differentials is not None
def test_transforms(sc):
trans = sc.transform_to("galactic")
assert isinstance(trans.frame, Galactic)
def test_transforms_diff(sc):
# note that arguably this *should* fail for the no-distance cases: 3D
# information is necessary to truly solve this, hence the xfail
if not sc.distance.unit.is_equivalent(u.m):
pytest.xfail("Should fail for no-distance cases")
else:
trans = sc.transform_to(PrecessedGeocentric(equinox="B1975"))
assert isinstance(trans.frame, PrecessedGeocentric)
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
def test_matching(sc, scmany):
# just check that it works and yields something
idx, d2d, d3d = sc.match_to_catalog_sky(scmany)
def test_position_angle(sc, sc_for_sep):
sc.position_angle(sc_for_sep)
def test_constellations(sc):
const = sc.get_constellation()
assert const == "Pisces"
def test_separation_3d_with_differentials():
c1 = SkyCoord(
ra=138 * u.deg,
dec=-17 * u.deg,
distance=100 * u.pc,
pm_ra_cosdec=5 * u.mas / u.yr,
pm_dec=-7 * u.mas / u.yr,
radial_velocity=160 * u.km / u.s,
)
c2 = SkyCoord(
ra=138 * u.deg,
dec=-17 * u.deg,
distance=105 * u.pc,
pm_ra_cosdec=15 * u.mas / u.yr,
pm_dec=-74 * u.mas / u.yr,
radial_velocity=-60 * u.km / u.s,
)
sep = c1.separation_3d(c2)
assert_quantity_allclose(sep, 5 * u.pc)
@pytest.mark.parametrize("sph_type", ["spherical", "unitspherical"])
def test_cartesian_to_spherical(sph_type):
"""Conversion to unitspherical should work, even if we lose distance."""
c = SkyCoord(
x=1 * u.kpc,
y=0 * u.kpc,
z=0 * u.kpc,
v_x=10 * u.km / u.s,
v_y=0 * u.km / u.s,
v_z=4.74 * u.km / u.s,
representation_type="cartesian",
)
c.representation_type = sph_type
assert c.ra == 0
assert c.dec == 0
assert c.pm_ra == 0
assert u.allclose(c.pm_dec, 1 * u.mas / u.yr, rtol=1e-3)
assert c.radial_velocity == 10 * u.km / u.s
if sph_type == "spherical":
assert c.distance == 1 * u.kpc
else:
assert not hasattr(c, "distance")
@pytest.mark.parametrize(
"diff_info, diff_cls",
[
(dict(radial_velocity=[20, 30] * u.km / u.s), RadialDifferential),
(
dict(
pm_ra=[2, 3] * u.mas / u.yr,
pm_dec=[-3, -4] * u.mas / u.yr,
differential_type="unitspherical",
),
UnitSphericalDifferential,
),
(
dict(pm_ra_cosdec=[2, 3] * u.mas / u.yr, pm_dec=[-3, -4] * u.mas / u.yr),
UnitSphericalCosLatDifferential,
),
],
scope="class",
)
class TestDifferentialClassPropagation:
"""Test that going in between spherical and unit-spherical, we do not
change differential type (since both can handle the same types).
"""
def test_sc_unit_spherical_with_pm_or_rv_only(self, diff_info, diff_cls):
sc = SkyCoord(ra=[10, 20] * u.deg, dec=[-10, 10] * u.deg, **diff_info)
assert isinstance(sc.data, UnitSphericalRepresentation)
assert isinstance(sc.data.differentials["s"], diff_cls)
sr = sc.represent_as("spherical")
assert isinstance(sr, SphericalRepresentation)
assert isinstance(sr.differentials["s"], diff_cls)
def test_sc_spherical_with_pm_or_rv_only(self, diff_info, diff_cls):
sc = SkyCoord(
ra=[10, 20] * u.deg,
dec=[-10, 10] * u.deg,
distance=1.0 * u.kpc,
**diff_info
)
assert isinstance(sc.data, SphericalRepresentation)
assert isinstance(sc.data.differentials["s"], diff_cls)
sr = sc.represent_as("unitspherical")
assert isinstance(sr, UnitSphericalRepresentation)
assert isinstance(sr.differentials["s"], diff_cls)
|
778091d349da1677da52eed814ee22574c97ad8cd55cfe8583993cef0a666eff | from contextlib import nullcontext
import numpy as np
import pytest
from numpy.testing import assert_allclose
import astropy.units as u
from astropy import time
from astropy.constants import c
from astropy.coordinates import (
FK5,
GCRS,
ICRS,
CartesianDifferential,
CartesianRepresentation,
EarthLocation,
Galactic,
SkyCoord,
SpectralQuantity,
get_body_barycentric_posvel,
)
from astropy.coordinates.spectral_coordinate import (
SpectralCoord,
_apply_relativistic_doppler_shift,
)
from astropy.table import Table
from astropy.tests.helper import assert_quantity_allclose, quantity_allclose
from astropy.utils import iers
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning, AstropyWarning
from astropy.wcs.wcsapi.fitswcs import VELOCITY_FRAMES as FITSWCS_VELOCITY_FRAMES
def assert_frame_allclose(
frame1,
frame2,
pos_rtol=1e-7,
pos_atol=1 * u.m,
vel_rtol=1e-7,
vel_atol=1 * u.mm / u.s,
):
# checks that:
# - the positions are equal to within some tolerance (the relative tolerance
# should be dimensionless, the absolute tolerance should be a distance).
# note that these are the tolerances *in 3d*
# - either both or nether frame has velocities, or if one has no velocities
# the other one can have zero velocities
# - if velocities are present, they are equal to some tolerance
# Ideally this should accept both frames and SkyCoords
if hasattr(frame1, "frame"): # SkyCoord-like
frame1 = frame1.frame
if hasattr(frame2, "frame"): # SkyCoord-like
frame2 = frame2.frame
# assert (frame1.data.differentials and frame2.data.differentials or
# (not frame1.data.differentials and not frame2.data.differentials))
assert frame1.is_equivalent_frame(frame2)
frame2_in_1 = frame2.transform_to(frame1)
assert_quantity_allclose(
0 * u.m, frame1.separation_3d(frame2_in_1), rtol=pos_rtol, atol=pos_atol
)
if frame1.data.differentials:
d1 = frame1.data.represent_as(
CartesianRepresentation, CartesianDifferential
).differentials["s"]
d2 = frame2_in_1.data.represent_as(
CartesianRepresentation, CartesianDifferential
).differentials["s"]
assert_quantity_allclose(d1.norm(d1), d1.norm(d2), rtol=vel_rtol, atol=vel_atol)
@pytest.fixture(scope="module")
def greenwich_earthlocation(request):
if (
not hasattr(EarthLocation, "_site_registry")
and request.config.getoption("remote_data") == "none"
):
EarthLocation._get_site_registry(force_builtin=True)
return EarthLocation.of_site("Greenwich")
# GENERAL TESTS
# We first run through a series of cases to test different ways of initializing
# the observer and target for SpectralCoord, including for example frames,
# SkyCoords, and making sure that SpectralCoord is not sensitive to the actual
# frame or representation class.
# Local Standard of Rest
LSRD = Galactic(
u=0.1 * u.km,
v=0.1 * u.km,
w=0.1 * u.km,
U=9 * u.km / u.s,
V=12 * u.km / u.s,
W=7 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
)
LSRD_EQUIV = [
LSRD,
SkyCoord(LSRD), # as a SkyCoord
LSRD.transform_to(ICRS()), # different frame
LSRD.transform_to(ICRS()).transform_to(Galactic()), # different representation
]
@pytest.fixture(params=[None] + LSRD_EQUIV)
def observer(request):
return request.param
# Target located in direction of motion of LSRD with no velocities
LSRD_DIR_STATIONARY = Galactic(
u=9 * u.km, v=12 * u.km, w=7 * u.km, representation_type="cartesian"
)
LSRD_DIR_STATIONARY_EQUIV = [
LSRD_DIR_STATIONARY,
SkyCoord(LSRD_DIR_STATIONARY), # as a SkyCoord
LSRD_DIR_STATIONARY.transform_to(FK5()), # different frame
# different representation
LSRD_DIR_STATIONARY.transform_to(ICRS()).transform_to(Galactic()),
]
@pytest.fixture(params=[None] + LSRD_DIR_STATIONARY_EQUIV)
def target(request):
return request.param
def test_create_spectral_coord_observer_target(observer, target):
with nullcontext() if target is None else pytest.warns(
AstropyUserWarning, match="No velocity defined on frame"
):
coord = SpectralCoord([100, 200, 300] * u.nm, observer=observer, target=target)
if observer is None:
assert coord.observer is None
else:
assert_frame_allclose(observer, coord.observer)
if target is None:
assert coord.target is None
else:
assert_frame_allclose(target, coord.target)
assert coord.doppler_rest is None
assert coord.doppler_convention is None
if observer is None or target is None:
assert quantity_allclose(coord.redshift, 0)
assert quantity_allclose(coord.radial_velocity, 0 * u.km / u.s)
elif any(observer is lsrd for lsrd in LSRD_EQUIV) and any(
target is lsrd for lsrd in LSRD_DIR_STATIONARY_EQUIV
):
assert_quantity_allclose(
coord.radial_velocity, -(274**0.5) * u.km / u.s, atol=1e-4 * u.km / u.s
)
assert_quantity_allclose(coord.redshift, -5.5213158163147646e-05, atol=1e-9)
else:
raise NotImplementedError()
def test_create_from_spectral_coord(observer, target):
"""
Checks that parameters are correctly copied to the new SpectralCoord object
"""
with nullcontext() if target is None else pytest.warns(
AstropyUserWarning, match="No velocity defined on frame"
):
spec_coord1 = SpectralCoord(
[100, 200, 300] * u.nm,
observer=observer,
target=target,
doppler_convention="optical",
doppler_rest=6000 * u.AA,
)
spec_coord2 = SpectralCoord(spec_coord1)
assert spec_coord1.observer == spec_coord2.observer
assert spec_coord1.target == spec_coord2.target
assert spec_coord1.radial_velocity == spec_coord2.radial_velocity
assert spec_coord1.doppler_convention == spec_coord2.doppler_convention
assert spec_coord1.doppler_rest == spec_coord2.doppler_rest
# INTERNAL FUNCTIONS TESTS
def test_apply_relativistic_doppler_shift():
# Frequency
sq1 = SpectralQuantity(1 * u.GHz)
sq2 = _apply_relativistic_doppler_shift(sq1, 0.5 * c)
assert_quantity_allclose(sq2, np.sqrt(1.0 / 3.0) * u.GHz)
# Wavelength
sq3 = SpectralQuantity(500 * u.nm)
sq4 = _apply_relativistic_doppler_shift(sq3, 0.5 * c)
assert_quantity_allclose(sq4, np.sqrt(3) * 500 * u.nm)
# Energy
sq5 = SpectralQuantity(300 * u.eV)
sq6 = _apply_relativistic_doppler_shift(sq5, 0.5 * c)
assert_quantity_allclose(sq6, np.sqrt(1.0 / 3.0) * 300 * u.eV)
# Wavenumber
sq7 = SpectralQuantity(0.01 / u.micron)
sq8 = _apply_relativistic_doppler_shift(sq7, 0.5 * c)
assert_quantity_allclose(sq8, np.sqrt(1.0 / 3.0) * 0.01 / u.micron)
# Velocity (doppler_convention='relativistic')
sq9 = SpectralQuantity(
200 * u.km / u.s, doppler_convention="relativistic", doppler_rest=1 * u.GHz
)
sq10 = _apply_relativistic_doppler_shift(sq9, 300 * u.km / u.s)
assert_quantity_allclose(sq10, 499.999666 * u.km / u.s)
assert sq10.doppler_convention == "relativistic"
# Velocity (doppler_convention='optical')
sq11 = SpectralQuantity(
200 * u.km / u.s, doppler_convention="radio", doppler_rest=1 * u.GHz
)
sq12 = _apply_relativistic_doppler_shift(sq11, 300 * u.km / u.s)
assert_quantity_allclose(sq12, 499.650008 * u.km / u.s)
assert sq12.doppler_convention == "radio"
# Velocity (doppler_convention='radio')
sq13 = SpectralQuantity(
200 * u.km / u.s, doppler_convention="optical", doppler_rest=1 * u.GHz
)
sq14 = _apply_relativistic_doppler_shift(sq13, 300 * u.km / u.s)
assert_quantity_allclose(sq14, 500.350493 * u.km / u.s)
assert sq14.doppler_convention == "optical"
# Velocity - check relativistic velocity addition
sq13 = SpectralQuantity(
0 * u.km / u.s, doppler_convention="relativistic", doppler_rest=1 * u.GHz
)
sq14 = _apply_relativistic_doppler_shift(sq13, 0.999 * c)
assert_quantity_allclose(sq14, 0.999 * c)
sq14 = _apply_relativistic_doppler_shift(sq14, 0.999 * c)
assert_quantity_allclose(sq14, (0.999 * 2) / (1 + 0.999**2) * c)
assert sq14.doppler_convention == "relativistic"
# Cases that should raise errors
sq15 = SpectralQuantity(200 * u.km / u.s)
with pytest.raises(ValueError, match="doppler_convention not set"):
_apply_relativistic_doppler_shift(sq15, 300 * u.km / u.s)
sq16 = SpectralQuantity(200 * u.km / u.s, doppler_rest=10 * u.GHz)
with pytest.raises(ValueError, match="doppler_convention not set"):
_apply_relativistic_doppler_shift(sq16, 300 * u.km / u.s)
sq17 = SpectralQuantity(200 * u.km / u.s, doppler_convention="optical")
with pytest.raises(ValueError, match="doppler_rest not set"):
_apply_relativistic_doppler_shift(sq17, 300 * u.km / u.s)
# BASIC TESTS
def test_init_quantity():
sc = SpectralCoord(10 * u.GHz)
assert sc.value == 10.0
assert sc.unit is u.GHz
assert sc.doppler_convention is None
assert sc.doppler_rest is None
assert sc.observer is None
assert sc.target is None
def test_init_spectral_quantity():
sc = SpectralCoord(SpectralQuantity(10 * u.GHz, doppler_convention="optical"))
assert sc.value == 10.0
assert sc.unit is u.GHz
assert sc.doppler_convention == "optical"
assert sc.doppler_rest is None
assert sc.observer is None
assert sc.target is None
def test_init_too_many_args():
with pytest.raises(
ValueError, match="Cannot specify radial velocity or redshift if both"
):
SpectralCoord(
10 * u.GHz,
observer=LSRD,
target=SkyCoord(10, 20, unit="deg"),
radial_velocity=1 * u.km / u.s,
)
with pytest.raises(
ValueError, match="Cannot specify radial velocity or redshift if both"
):
SpectralCoord(
10 * u.GHz, observer=LSRD, target=SkyCoord(10, 20, unit="deg"), redshift=1
)
with pytest.raises(
ValueError, match="Cannot set both a radial velocity and redshift"
):
SpectralCoord(10 * u.GHz, radial_velocity=1 * u.km / u.s, redshift=1)
def test_init_wrong_type():
with pytest.raises(
TypeError, match="observer must be a SkyCoord or coordinate frame instance"
):
SpectralCoord(10 * u.GHz, observer=3.4)
with pytest.raises(
TypeError, match="target must be a SkyCoord or coordinate frame instance"
):
SpectralCoord(10 * u.GHz, target=3.4)
with pytest.raises(
u.UnitsError,
match=(
"Argument 'radial_velocity' to function "
"'__new__' must be in units convertible to 'km / s'"
),
):
SpectralCoord(10 * u.GHz, radial_velocity=1 * u.kg)
with pytest.raises(
TypeError,
match=(
"Argument 'radial_velocity' to function '__new__' has no 'unit' attribute."
" You should pass in an astropy Quantity instead."
),
):
SpectralCoord(10 * u.GHz, radial_velocity="banana")
with pytest.raises(u.UnitsError, match="redshift should be dimensionless"):
SpectralCoord(10 * u.GHz, redshift=1 * u.m)
with pytest.raises(
TypeError,
match='Cannot parse "banana" as a Quantity. It does not start with a number.',
):
SpectralCoord(10 * u.GHz, redshift="banana")
def test_observer_init_rv_behavior():
"""
Test basic initialization behavior or observer/target and redshift/rv
"""
# Start off by specifying the radial velocity only
sc_init = SpectralCoord([4000, 5000] * u.AA, radial_velocity=100 * u.km / u.s)
assert sc_init.observer is None
assert sc_init.target is None
assert_quantity_allclose(sc_init.radial_velocity, 100 * u.km / u.s)
# Next, set the observer, and check that the radial velocity hasn't changed
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc_init.observer = ICRS(CartesianRepresentation([0 * u.km, 0 * u.km, 0 * u.km]))
assert sc_init.observer is not None
assert_quantity_allclose(sc_init.radial_velocity, 100 * u.km / u.s)
# Setting the target should now cause the original radial velocity to be
# dropped in favor of the automatically computed one
sc_init.target = SkyCoord(
CartesianRepresentation([1 * u.km, 0 * u.km, 0 * u.km]),
frame="icrs",
radial_velocity=30 * u.km / u.s,
)
assert sc_init.target is not None
assert_quantity_allclose(sc_init.radial_velocity, 30 * u.km / u.s)
# The observer can only be set if originally None - now that it isn't
# setting it again should fail
with pytest.raises(ValueError, match="observer has already been set"):
sc_init.observer = GCRS(CartesianRepresentation([0 * u.km, 1 * u.km, 0 * u.km]))
# And similarly, changing the target should not be possible
with pytest.raises(ValueError, match="target has already been set"):
sc_init.target = GCRS(CartesianRepresentation([0 * u.km, 1 * u.km, 0 * u.km]))
def test_rv_redshift_initialization():
# Check that setting the redshift sets the radial velocity appropriately,
# and that the redshift can be recovered
sc_init = SpectralCoord([4000, 5000] * u.AA, redshift=1)
assert isinstance(sc_init.redshift, u.Quantity)
assert_quantity_allclose(sc_init.redshift, 1 * u.dimensionless_unscaled)
assert_quantity_allclose(sc_init.radial_velocity, 0.6 * c)
# Check that setting the same radial velocity produces the same redshift
# and that the radial velocity can be recovered
sc_init2 = SpectralCoord([4000, 5000] * u.AA, radial_velocity=0.6 * c)
assert_quantity_allclose(sc_init2.redshift, 1 * u.dimensionless_unscaled)
assert_quantity_allclose(sc_init2.radial_velocity, 0.6 * c)
# Check that specifying redshift as a quantity works
sc_init3 = SpectralCoord([4000, 5000] * u.AA, redshift=1 * u.one)
assert sc_init.redshift == sc_init3.redshift
# Make sure that both redshift and radial velocity can't be specified at
# the same time.
with pytest.raises(
ValueError, match="Cannot set both a radial velocity and redshift"
):
SpectralCoord([4000, 5000] * u.AA, radial_velocity=10 * u.km / u.s, redshift=2)
def test_replicate():
# The replicate method makes a new object with attributes updated, but doesn't
# do any conversion
sc_init = SpectralCoord([4000, 5000] * u.AA, redshift=2)
sc_set_rv = sc_init.replicate(redshift=1)
assert_quantity_allclose(sc_set_rv.radial_velocity, 0.6 * c)
assert_quantity_allclose(sc_init, [4000, 5000] * u.AA)
sc_set_rv = sc_init.replicate(radial_velocity=c / 2)
assert_quantity_allclose(sc_set_rv.redshift, np.sqrt(3) - 1)
assert_quantity_allclose(sc_init, [4000, 5000] * u.AA)
gcrs_origin = GCRS(CartesianRepresentation([0 * u.km, 0 * u.km, 0 * u.km]))
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc_init2 = SpectralCoord([4000, 5000] * u.AA, redshift=1, observer=gcrs_origin)
with np.errstate(all="ignore"):
sc_init2.replicate(redshift=0.5)
assert_quantity_allclose(sc_init2, [4000, 5000] * u.AA)
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc_init3 = SpectralCoord([4000, 5000] * u.AA, redshift=1, target=gcrs_origin)
with np.errstate(all="ignore"):
sc_init3.replicate(redshift=0.5)
assert_quantity_allclose(sc_init2, [4000, 5000] * u.AA)
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc_init4 = SpectralCoord(
[4000, 5000] * u.AA, observer=gcrs_origin, target=gcrs_origin
)
with pytest.raises(
ValueError,
match=(
"Cannot specify radial velocity or redshift if both target and observer are"
" specified"
),
):
sc_init4.replicate(redshift=0.5)
sc_init = SpectralCoord([4000, 5000] * u.AA, redshift=2)
sc_init_copy = sc_init.replicate(copy=True)
sc_init[0] = 6000 * u.AA
assert_quantity_allclose(sc_init_copy, [4000, 5000] * u.AA)
sc_init = SpectralCoord([4000, 5000] * u.AA, redshift=2)
sc_init_ref = sc_init.replicate()
sc_init[0] = 6000 * u.AA
assert_quantity_allclose(sc_init_ref, [6000, 5000] * u.AA)
def test_with_observer_stationary_relative_to():
# Simple tests of with_observer_stationary_relative_to to cover different
# ways of calling it
# The replicate method makes a new object with attributes updated, but doesn't
# do any conversion
sc1 = SpectralCoord([4000, 5000] * u.AA)
with pytest.raises(
ValueError,
match=(
"This method can only be used if both observer and target are defined on"
" the SpectralCoord"
),
):
sc1.with_observer_stationary_relative_to("icrs")
sc2 = SpectralCoord(
[4000, 5000] * u.AA,
observer=ICRS(
0 * u.km,
0 * u.km,
0 * u.km,
-1 * u.km / u.s,
0 * u.km / u.s,
-1 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
),
target=ICRS(
0 * u.deg, 45 * u.deg, distance=1 * u.kpc, radial_velocity=2 * u.km / u.s
),
)
# Motion of observer is in opposite direction to target
assert_quantity_allclose(sc2.radial_velocity, (2 + 2**0.5) * u.km / u.s)
# Change to observer that is stationary in ICRS
sc3 = sc2.with_observer_stationary_relative_to("icrs")
# Velocity difference is now pure radial velocity of target
assert_quantity_allclose(sc3.radial_velocity, 2 * u.km / u.s)
# Check setting the velocity in with_observer_stationary_relative_to
sc4 = sc2.with_observer_stationary_relative_to(
"icrs", velocity=[-(2**0.5), 0, -(2**0.5)] * u.km / u.s
)
# Observer once again moving away from target but faster
assert_quantity_allclose(sc4.radial_velocity, 4 * u.km / u.s)
# Check that we can also pass frame classes instead of names
sc5 = sc2.with_observer_stationary_relative_to(
ICRS, velocity=[-(2**0.5), 0, -(2**0.5)] * u.km / u.s
)
assert_quantity_allclose(sc5.radial_velocity, 4 * u.km / u.s)
# And make sure we can also pass instances of classes without data
sc6 = sc2.with_observer_stationary_relative_to(
ICRS(), velocity=[-(2**0.5), 0, -(2**0.5)] * u.km / u.s
)
assert_quantity_allclose(sc6.radial_velocity, 4 * u.km / u.s)
# And with data provided no velocities are present
sc7 = sc2.with_observer_stationary_relative_to(
ICRS(0 * u.km, 0 * u.km, 0 * u.km, representation_type="cartesian"),
velocity=[-(2**0.5), 0, -(2**0.5)] * u.km / u.s,
)
assert_quantity_allclose(sc7.radial_velocity, 4 * u.km / u.s)
# And also have the ability to pass frames with velocities already defined
sc8 = sc2.with_observer_stationary_relative_to(
ICRS(
0 * u.km,
0 * u.km,
0 * u.km,
2**0.5 * u.km / u.s,
0 * u.km / u.s,
2**0.5 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
)
)
assert_quantity_allclose(
sc8.radial_velocity, 0 * u.km / u.s, atol=1e-10 * u.km / u.s
)
# Make sure that things work properly if passing a SkyCoord
sc9 = sc2.with_observer_stationary_relative_to(
SkyCoord(ICRS(0 * u.km, 0 * u.km, 0 * u.km, representation_type="cartesian")),
velocity=[-(2**0.5), 0, -(2**0.5)] * u.km / u.s,
)
assert_quantity_allclose(sc9.radial_velocity, 4 * u.km / u.s)
sc10 = sc2.with_observer_stationary_relative_to(
SkyCoord(
ICRS(
0 * u.km,
0 * u.km,
0 * u.km,
2**0.5 * u.km / u.s,
0 * u.km / u.s,
2**0.5 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
)
)
)
assert_quantity_allclose(
sc10.radial_velocity, 0 * u.km / u.s, atol=1e-10 * u.km / u.s
)
# But we shouldn't be able to pass both a frame with velocities, and explicit velocities
with pytest.raises(
ValueError,
match="frame already has differentials, cannot also specify velocity",
):
sc2.with_observer_stationary_relative_to(
ICRS(
0 * u.km,
0 * u.km,
0 * u.km,
2**0.5 * u.km / u.s,
0 * u.km / u.s,
2**0.5 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
),
velocity=[-(2**0.5), 0, -(2**0.5)] * u.km / u.s,
)
# And velocities should have three elements
with pytest.raises(
ValueError, match="velocity should be a Quantity vector with 3 elements"
):
sc2.with_observer_stationary_relative_to(
ICRS, velocity=[-(2**0.5), 0, -(2**0.5), -3] * u.km / u.s
)
# Make sure things don't change depending on what frame class is used for reference
sc11 = sc2.with_observer_stationary_relative_to(
SkyCoord(
ICRS(
0 * u.km,
0 * u.km,
0 * u.km,
2**0.5 * u.km / u.s,
0 * u.km / u.s,
2**0.5 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
)
).transform_to(Galactic)
)
assert_quantity_allclose(
sc11.radial_velocity, 0 * u.km / u.s, atol=1e-10 * u.km / u.s
)
# Check that it is possible to preserve the observer frame
sc12 = sc2.with_observer_stationary_relative_to(LSRD)
sc13 = sc2.with_observer_stationary_relative_to(LSRD, preserve_observer_frame=True)
assert isinstance(sc12.observer, Galactic)
assert isinstance(sc13.observer, ICRS)
def test_los_shift_radial_velocity():
# Tests to make sure that with_radial_velocity_shift correctly calculates
# the new radial velocity
# First check case where observer and/or target aren't specified
sc1 = SpectralCoord(500 * u.nm, radial_velocity=1 * u.km / u.s)
sc2 = sc1.with_radial_velocity_shift(1 * u.km / u.s)
assert_quantity_allclose(sc2.radial_velocity, 2 * u.km / u.s)
sc3 = sc1.with_radial_velocity_shift(-3 * u.km / u.s)
assert_quantity_allclose(sc3.radial_velocity, -2 * u.km / u.s)
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc4 = SpectralCoord(
500 * u.nm, radial_velocity=1 * u.km / u.s, observer=gcrs_not_origin
)
sc5 = sc4.with_radial_velocity_shift(1 * u.km / u.s)
assert_quantity_allclose(sc5.radial_velocity, 2 * u.km / u.s)
sc6 = sc4.with_radial_velocity_shift(-3 * u.km / u.s)
assert_quantity_allclose(sc6.radial_velocity, -2 * u.km / u.s)
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc7 = SpectralCoord(
500 * u.nm,
radial_velocity=1 * u.km / u.s,
target=ICRS(10 * u.deg, 20 * u.deg),
)
sc8 = sc7.with_radial_velocity_shift(1 * u.km / u.s)
assert_quantity_allclose(sc8.radial_velocity, 2 * u.km / u.s)
sc9 = sc7.with_radial_velocity_shift(-3 * u.km / u.s)
assert_quantity_allclose(sc9.radial_velocity, -2 * u.km / u.s)
# Check that things still work when both observer and target are specified
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc10 = SpectralCoord(
500 * u.nm,
observer=ICRS(0 * u.deg, 0 * u.deg, distance=1 * u.m),
target=ICRS(
10 * u.deg,
20 * u.deg,
radial_velocity=1 * u.km / u.s,
distance=10 * u.kpc,
),
)
sc11 = sc10.with_radial_velocity_shift(1 * u.km / u.s)
assert_quantity_allclose(sc11.radial_velocity, 2 * u.km / u.s)
sc12 = sc10.with_radial_velocity_shift(-3 * u.km / u.s)
assert_quantity_allclose(sc12.radial_velocity, -2 * u.km / u.s)
# Check that things work if radial_velocity wasn't specified at all
sc13 = SpectralCoord(500 * u.nm)
sc14 = sc13.with_radial_velocity_shift(1 * u.km / u.s)
assert_quantity_allclose(sc14.radial_velocity, 1 * u.km / u.s)
sc15 = sc1.with_radial_velocity_shift()
assert_quantity_allclose(sc15.radial_velocity, 1 * u.km / u.s)
# Check that units are verified
with pytest.raises(
u.UnitsError,
match=(
"Argument must have unit physical type 'speed' for radial velocty or "
"'dimensionless' for redshift."
),
):
sc1.with_radial_velocity_shift(target_shift=1 * u.kg)
@pytest.mark.xfail
def test_relativistic_radial_velocity():
# Test for when both observer and target have relativistic velocities.
# This is not yet supported, so the test is xfailed for now.
sc = SpectralCoord(
500 * u.nm,
observer=ICRS(
0 * u.km,
0 * u.km,
0 * u.km,
-0.5 * c,
-0.5 * c,
-0.5 * c,
representation_type="cartesian",
differential_type="cartesian",
),
target=ICRS(
1 * u.kpc,
1 * u.kpc,
1 * u.kpc,
0.5 * c,
0.5 * c,
0.5 * c,
representation_type="cartesian",
differential_type="cartesian",
),
)
assert_quantity_allclose(sc.radial_velocity, 0.989743318610787 * u.km / u.s)
# SCIENCE USE CASE TESTS
def test_spectral_coord_jupiter(greenwich_earthlocation):
"""
Checks radial velocity between Earth and Jupiter
"""
obstime = time.Time("2018-12-13 9:00")
obs = greenwich_earthlocation.get_gcrs(obstime)
pos, vel = get_body_barycentric_posvel("jupiter", obstime)
jupiter = SkyCoord(
pos.with_differentials(CartesianDifferential(vel.xyz)), obstime=obstime
)
spc = SpectralCoord([100, 200, 300] * u.nm, observer=obs, target=jupiter)
# The velocity should be less than ~43 + a bit extra, which is the
# maximum possible earth-jupiter relative velocity. We check the exact
# value here (determined from SpectralCoord, so this serves as a test to
# check that this value doesn't change - the value is not a ground truth)
assert_quantity_allclose(spc.radial_velocity, -7.35219854 * u.km / u.s)
def test_spectral_coord_alphacen(greenwich_earthlocation):
"""
Checks radial velocity between Earth and Alpha Centauri
"""
obstime = time.Time("2018-12-13 9:00")
obs = greenwich_earthlocation.get_gcrs(obstime)
# Coordinates were obtained from the following then hard-coded to avoid download
# acen = SkyCoord.from_name('alpha cen')
acen = SkyCoord(
ra=219.90085 * u.deg,
dec=-60.83562 * u.deg,
frame="icrs",
distance=4.37 * u.lightyear,
radial_velocity=-18.0 * u.km / u.s,
)
spc = SpectralCoord([100, 200, 300] * u.nm, observer=obs, target=acen)
# The velocity should be less than ~18 + 30 + a bit extra, which is the
# maximum possible relative velocity. We check the exact value here
# (determined from SpectralCoord, so this serves as a test to check that
# this value doesn't change - the value is not a ground truth)
assert_quantity_allclose(spc.radial_velocity, -26.328301 * u.km / u.s)
def test_spectral_coord_m31(greenwich_earthlocation):
"""
Checks radial velocity between Earth and M31
"""
obstime = time.Time("2018-12-13 9:00")
obs = greenwich_earthlocation.get_gcrs(obstime)
# Coordinates were obtained from the following then hard-coded to avoid download
# m31 = SkyCoord.from_name('M31')
m31 = SkyCoord(
ra=10.6847 * u.deg,
dec=41.269 * u.deg,
distance=710 * u.kpc,
radial_velocity=-300 * u.km / u.s,
)
spc = SpectralCoord([100, 200, 300] * u.nm, observer=obs, target=m31)
# The velocity should be less than ~300 + 30 + a bit extra in km/s, which
# is the maximum possible relative velocity. We check the exact values
# here (determined from SpectralCoord, so this serves as a test to check
# that this value doesn't change - the value is not a ground truth)
assert_quantity_allclose(spc.radial_velocity, -279.755128 * u.km / u.s)
assert_allclose(spc.redshift, -0.0009327276702120191)
def test_shift_to_rest_galaxy():
"""
This tests storing a spectral coordinate with a specific redshift, and then
doing basic rest-to-observed-and-back transformations
"""
z = 5
rest_line_wls = [5007, 6563] * u.AA
observed_spc = SpectralCoord(rest_line_wls * (z + 1), redshift=z)
rest_spc = observed_spc.to_rest()
# alternatively:
# rest_spc = observed_spc.with_observer(observed_spec.target)
# although then it would have to be clearly documented, or the `to_rest`
# implemented in Spectrum1D?
assert_quantity_allclose(rest_spc, rest_line_wls)
# No frames are explicitly defined, so to the user, the observer and
# target are not set.
with pytest.raises(AttributeError):
assert_frame_allclose(rest_spc.observer, rest_spc.target)
def test_shift_to_rest_star_withobserver(greenwich_earthlocation):
rv = -8.3283011 * u.km / u.s
rest_line_wls = [5007, 6563] * u.AA
obstime = time.Time("2018-12-13 9:00")
eloc = greenwich_earthlocation
obs = eloc.get_gcrs(obstime)
acen = SkyCoord(
ra=219.90085 * u.deg,
dec=-60.83562 * u.deg,
frame="icrs",
distance=4.37 * u.lightyear,
)
# Note that above the rv is missing from the SkyCoord.
# That's intended, as it will instead be set in the `SpectralCoord`. But
# the SpectralCoord machinery should yield something comparable to test_
# spectral_coord_alphacen
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
observed_spc = SpectralCoord(
rest_line_wls * (rv / c + 1), observer=obs, target=acen
)
rest_spc = observed_spc.to_rest()
assert_quantity_allclose(rest_spc, rest_line_wls)
barycentric_spc = observed_spc.with_observer_stationary_relative_to("icrs")
baryrest_spc = barycentric_spc.to_rest()
assert quantity_allclose(baryrest_spc, rest_line_wls)
# now make sure the change the barycentric shift did is comparable to the
# offset rv_correction produces
# barytarg = SkyCoord(barycentric_spc.target.frame) # should be this but that doesn't work for unclear reasons
barytarg = SkyCoord(
barycentric_spc.target.data.without_differentials(),
frame=barycentric_spc.target.realize_frame(None),
)
vcorr = barytarg.radial_velocity_correction(
kind="barycentric", obstime=obstime, location=eloc
)
drv = baryrest_spc.radial_velocity - observed_spc.radial_velocity
# note this probably will not work on the first try, but it's ok if this is
# "good enough", where good enough is estimated below. But that could be
# adjusted if we think that's too aggressive of a precision target for what
# the machinery can handle
# with pytest.raises(AssertionError):
assert_quantity_allclose(vcorr, drv, atol=10 * u.m / u.s)
gcrs_origin = GCRS(CartesianRepresentation([0 * u.km, 0 * u.km, 0 * u.km]))
gcrs_not_origin = GCRS(CartesianRepresentation([1 * u.km, 0 * u.km, 0 * u.km]))
@pytest.mark.parametrize(
"sc_kwargs",
[
dict(radial_velocity=0 * u.km / u.s),
dict(observer=gcrs_origin, radial_velocity=0 * u.km / u.s),
dict(target=gcrs_origin, radial_velocity=0 * u.km / u.s),
dict(observer=gcrs_origin, target=gcrs_not_origin),
],
)
def test_los_shift(sc_kwargs):
wl = [4000, 5000] * u.AA
with nullcontext() if "observer" not in sc_kwargs and "target" not in sc_kwargs else pytest.warns(
AstropyUserWarning, match="No velocity defined on frame"
):
sc_init = SpectralCoord(wl, **sc_kwargs)
# these should always work in *all* cases because it's unambiguous that
# a target shift should behave this way
new_sc1 = sc_init.with_radial_velocity_shift(0.1)
assert_quantity_allclose(new_sc1, wl * 1.1)
# interpret at redshift
new_sc2 = sc_init.with_radial_velocity_shift(0.1 * u.dimensionless_unscaled)
assert_quantity_allclose(new_sc1, new_sc2)
new_sc3 = sc_init.with_radial_velocity_shift(-100 * u.km / u.s)
assert_quantity_allclose(new_sc3, wl * (1 + (-100 * u.km / u.s / c)))
# now try the cases where observer is specified as well/instead
if sc_init.observer is None or sc_init.target is None:
with pytest.raises(ValueError):
# both must be specified if you're going to mess with observer
sc_init.with_radial_velocity_shift(observer_shift=0.1)
if sc_init.observer is not None and sc_init.target is not None:
# redshifting the observer should *blushift* the LOS velocity since
# its the observer-to-target vector that matters
new_sc4 = sc_init.with_radial_velocity_shift(observer_shift=0.1)
assert_quantity_allclose(new_sc4, wl / 1.1)
# an equal shift in both should produce no offset at all
new_sc5 = sc_init.with_radial_velocity_shift(
target_shift=0.1, observer_shift=0.1
)
assert_quantity_allclose(new_sc5, wl)
def test_asteroid_velocity_frame_shifts():
"""
This test mocks up the use case of observing a spectrum of an asteroid
at different times and from different observer locations.
"""
time1 = time.Time("2018-12-13 9:00")
dt = 12 * u.hour
time2 = time1 + dt
# make the silly but simplifying assumption that the astroid is moving along
# the x-axis of GCRS, and makes a 10 earth-radius closest approach
v_ast = [5, 0, 0] * u.km / u.s
x1 = -v_ast[0] * dt / 2
x2 = v_ast[0] * dt / 2
z = 10 * u.Rearth
cdiff = CartesianDifferential(v_ast)
asteroid_loc1 = GCRS(
CartesianRepresentation(x1.to(u.km), 0 * u.km, z.to(u.km), differentials=cdiff),
obstime=time1,
)
asteroid_loc2 = GCRS(
CartesianRepresentation(x2.to(u.km), 0 * u.km, z.to(u.km), differentials=cdiff),
obstime=time2,
)
# assume satellites that are essentially fixed in geostationary orbit on
# opposite sides of the earth
observer1 = GCRS(
CartesianRepresentation([0 * u.km, 35000 * u.km, 0 * u.km]), obstime=time1
)
observer2 = GCRS(
CartesianRepresentation([0 * u.km, -35000 * u.km, 0 * u.km]), obstime=time2
)
wls = np.linspace(4000, 7000, 100) * u.AA
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
spec_coord1 = SpectralCoord(wls, observer=observer1, target=asteroid_loc1)
assert spec_coord1.radial_velocity < 0 * u.km / u.s
assert spec_coord1.radial_velocity > -5 * u.km / u.s
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
spec_coord2 = SpectralCoord(wls, observer=observer2, target=asteroid_loc2)
assert spec_coord2.radial_velocity > 0 * u.km / u.s
assert spec_coord2.radial_velocity < 5 * u.km / u.s
# now check the behavior of with_observer_stationary_relative_to: we shift each coord
# into the velocity frame of its *own* target. That would then be a
# spectralcoord that would allow direct physical comparison of the two
# different spec_corrds. There's no way to test that, without
# actual data, though.
# spec_coord2 is redshifted, so we test that it behaves the way "shifting
# to rest frame" should - the as-observed spectral coordinate should become
# the rest frame, so something that starts out red should become bluer
target_sc2 = spec_coord2.with_observer_stationary_relative_to(spec_coord2.target)
assert np.all(target_sc2 < spec_coord2)
# rv/redshift should be 0 since the observer and target velocities should
# be the same
assert_quantity_allclose(
target_sc2.radial_velocity, 0 * u.km / u.s, atol=1e-7 * u.km / u.s
)
# check that the same holds for spec_coord1, but be more specific: it
# should follow the standard redshift formula (which in this case yields
# a blueshift, although the formula is the same as 1+z)
target_sc1 = spec_coord1.with_observer_stationary_relative_to(spec_coord1.target)
assert_quantity_allclose(target_sc1, spec_coord1 / (1 + spec_coord1.redshift))
# TODO: Figure out what is meant by the below use case
# ensure the "target-rest" use gives the same answer
# target_sc1_alt = spec_coord1.with_observer_stationary_relative_to('target-rest')
# assert_quantity_allclose(target_sc1, target_sc1_alt)
def test_spectral_coord_from_sky_coord_without_distance():
# see https://github.com/astropy/specutils/issues/658 for issue context
obs = SkyCoord(0 * u.m, 0 * u.m, 0 * u.m, representation_type="cartesian")
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
coord = SpectralCoord([1, 2, 3] * u.micron, observer=obs)
# coord.target = SkyCoord.from_name('m31') # <- original issue, but below is the same but requires no remote data access
with pytest.warns(
AstropyUserWarning, match="Distance on coordinate object is dimensionless"
):
coord.target = SkyCoord(ra=10.68470833 * u.deg, dec=41.26875 * u.deg)
EXPECTED_VELOCITY_FRAMES = {
"geocent": "gcrs",
"heliocent": "hcrs",
"lsrk": "lsrk",
"lsrd": "lsrd",
"galactoc": FITSWCS_VELOCITY_FRAMES["GALACTOC"],
"localgrp": FITSWCS_VELOCITY_FRAMES["LOCALGRP"],
}
@pytest.mark.parametrize("specsys", list(EXPECTED_VELOCITY_FRAMES))
@pytest.mark.slow
def test_spectralcoord_accuracy(specsys):
# This is a test to check the numerical results of transformations between
# different velocity frames in SpectralCoord. This compares the velocity
# shifts determined with SpectralCoord to those determined from the rv
# package in Starlink.
velocity_frame = EXPECTED_VELOCITY_FRAMES[specsys]
reference_filename = get_pkg_data_filename("accuracy/data/rv.ecsv")
reference_table = Table.read(reference_filename, format="ascii.ecsv")
rest = 550 * u.nm
with iers.conf.set_temp("auto_download", False):
for row in reference_table:
observer = EarthLocation.from_geodetic(
-row["obslon"], row["obslat"]
).get_itrs(obstime=row["obstime"])
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc_topo = SpectralCoord(
545 * u.nm, observer=observer, target=row["target"]
)
# FIXME: A warning is emitted for dates after MJD=57754.0 even
# though the leap second table should be valid until the end of
# 2020.
with nullcontext() if row["obstime"].mjd < 57754 else pytest.warns(
AstropyWarning, match="Tried to get polar motions"
):
sc_final = sc_topo.with_observer_stationary_relative_to(velocity_frame)
delta_vel = sc_topo.to(
u.km / u.s, doppler_convention="relativistic", doppler_rest=rest
) - sc_final.to(
u.km / u.s, doppler_convention="relativistic", doppler_rest=rest
)
if specsys == "galactoc":
assert_allclose(
delta_vel.to_value(u.km / u.s), row[specsys.lower()], atol=30
)
else:
assert_allclose(
delta_vel.to_value(u.km / u.s),
row[specsys.lower()],
atol=0.02,
rtol=0.002,
)
# TODO: add test when target is not ICRS
# TODO: add test when SpectralCoord is in velocity to start with
|
0b269b41e1bfe0307f90c12b73621bec5798ee5506111d8174b9cb4f21019c43 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import operator
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import (
CartesianDifferential,
CartesianRepresentation,
CylindricalDifferential,
CylindricalRepresentation,
Latitude,
Longitude,
PhysicsSphericalDifferential,
PhysicsSphericalRepresentation,
RadialDifferential,
RadialRepresentation,
SphericalCosLatDifferential,
SphericalDifferential,
SphericalRepresentation,
UnitSphericalCosLatDifferential,
UnitSphericalDifferential,
UnitSphericalRepresentation,
)
from astropy.coordinates.angle_utilities import angular_separation
from astropy.coordinates.representation import DIFFERENTIAL_CLASSES
from astropy.tests.helper import assert_quantity_allclose, quantity_allclose
def assert_representation_allclose(actual, desired, rtol=1.0e-7, atol=None, **kwargs):
actual_xyz = actual.to_cartesian().get_xyz(xyz_axis=-1)
desired_xyz = desired.to_cartesian().get_xyz(xyz_axis=-1)
actual_xyz, desired_xyz = np.broadcast_arrays(actual_xyz, desired_xyz, subok=True)
assert_quantity_allclose(actual_xyz, desired_xyz, rtol, atol, **kwargs)
def assert_differential_allclose(actual, desired, rtol=1.0e-7, **kwargs):
assert actual.components == desired.components
for component in actual.components:
actual_c = getattr(actual, component)
atol = 1.0e-10 * actual_c.unit
assert_quantity_allclose(
actual_c, getattr(desired, component), rtol, atol, **kwargs
)
def representation_equal(first, second):
return np.all(
getattr(first, comp) == getattr(second, comp) for comp in first.components
)
class TestArithmetic:
def setup_method(self):
# Choose some specific coordinates, for which ``sum`` and ``dot``
# works out nicely.
self.lon = Longitude(np.arange(0, 12.1, 2), u.hourangle)
self.lat = Latitude(np.arange(-90, 91, 30), u.deg)
self.distance = [5.0, 12.0, 4.0, 2.0, 4.0, 12.0, 5.0] * u.kpc
self.spherical = SphericalRepresentation(self.lon, self.lat, self.distance)
self.unit_spherical = self.spherical.represent_as(UnitSphericalRepresentation)
self.cartesian = self.spherical.to_cartesian()
def test_norm_spherical(self):
norm_s = self.spherical.norm()
assert isinstance(norm_s, u.Quantity)
# Just to be sure, test against getting object arrays.
assert norm_s.dtype.kind == "f"
assert np.all(norm_s == self.distance)
@pytest.mark.parametrize(
"representation",
(
PhysicsSphericalRepresentation,
CartesianRepresentation,
CylindricalRepresentation,
),
)
def test_norm(self, representation):
in_rep = self.spherical.represent_as(representation)
norm_rep = in_rep.norm()
assert isinstance(norm_rep, u.Quantity)
assert_quantity_allclose(norm_rep, self.distance)
def test_norm_unitspherical(self):
norm_rep = self.unit_spherical.norm()
assert norm_rep.unit == u.dimensionless_unscaled
assert np.all(norm_rep == 1.0 * u.dimensionless_unscaled)
@pytest.mark.parametrize(
"representation",
(
SphericalRepresentation,
PhysicsSphericalRepresentation,
CartesianRepresentation,
CylindricalRepresentation,
UnitSphericalRepresentation,
),
)
def test_neg_pos(self, representation):
in_rep = self.cartesian.represent_as(representation)
pos_rep = +in_rep
assert type(pos_rep) is type(in_rep)
assert pos_rep is not in_rep
assert np.all(representation_equal(pos_rep, in_rep))
neg_rep = -in_rep
assert type(neg_rep) is type(in_rep)
assert np.all(neg_rep.norm() == in_rep.norm())
in_rep_xyz = in_rep.to_cartesian().xyz
assert_quantity_allclose(
neg_rep.to_cartesian().xyz, -in_rep_xyz, atol=1.0e-10 * in_rep_xyz.unit
)
def test_mul_div_spherical(self):
s0 = self.spherical / (1.0 * u.Myr)
assert isinstance(s0, SphericalRepresentation)
assert s0.distance.dtype.kind == "f"
assert np.all(s0.lon == self.spherical.lon)
assert np.all(s0.lat == self.spherical.lat)
assert np.all(s0.distance == self.distance / (1.0 * u.Myr))
s1 = (1.0 / u.Myr) * self.spherical
assert isinstance(s1, SphericalRepresentation)
assert np.all(representation_equal(s1, s0))
s2 = self.spherical * np.array([[1.0], [2.0]])
assert isinstance(s2, SphericalRepresentation)
assert s2.shape == (2, self.spherical.shape[0])
assert np.all(s2.lon == self.spherical.lon)
assert np.all(s2.lat == self.spherical.lat)
assert np.all(s2.distance == self.spherical.distance * np.array([[1.0], [2.0]]))
s3 = np.array([[1.0], [2.0]]) * self.spherical
assert isinstance(s3, SphericalRepresentation)
assert np.all(representation_equal(s3, s2))
s4 = -self.spherical
assert isinstance(s4, SphericalRepresentation)
assert quantity_allclose(
s4.to_cartesian().xyz,
-self.spherical.to_cartesian().xyz,
atol=1e-15 * self.spherical.distance.unit,
)
assert np.all(s4.distance == self.spherical.distance)
s5 = +self.spherical
assert s5 is not self.spherical
assert np.all(representation_equal(s5, self.spherical))
@pytest.mark.parametrize(
"representation",
(
PhysicsSphericalRepresentation,
CartesianRepresentation,
CylindricalRepresentation,
),
)
def test_mul_div(self, representation):
in_rep = self.spherical.represent_as(representation)
r1 = in_rep / (1.0 * u.Myr)
assert isinstance(r1, representation)
for component in in_rep.components:
in_rep_comp = getattr(in_rep, component)
r1_comp = getattr(r1, component)
if in_rep_comp.unit == self.distance.unit:
assert np.all(r1_comp == in_rep_comp / (1.0 * u.Myr))
else:
assert np.all(r1_comp == in_rep_comp)
r2 = np.array([[1.0], [2.0]]) * in_rep
assert isinstance(r2, representation)
assert r2.shape == (2, in_rep.shape[0])
assert_quantity_allclose(r2.norm(), self.distance * np.array([[1.0], [2.0]]))
r3 = -in_rep
assert_representation_allclose(
r3.to_cartesian(), (in_rep * -1.0).to_cartesian(), atol=1e-5 * u.pc
)
with pytest.raises(TypeError):
in_rep * in_rep
with pytest.raises(TypeError):
dict() * in_rep
def test_mul_div_unit_spherical(self):
s1 = self.unit_spherical * self.distance
assert isinstance(s1, SphericalRepresentation)
assert np.all(s1.lon == self.unit_spherical.lon)
assert np.all(s1.lat == self.unit_spherical.lat)
assert np.all(s1.distance == self.spherical.distance)
s2 = self.unit_spherical / u.s
assert isinstance(s2, SphericalRepresentation)
assert np.all(s2.lon == self.unit_spherical.lon)
assert np.all(s2.lat == self.unit_spherical.lat)
assert np.all(s2.distance == 1.0 / u.s)
u3 = -self.unit_spherical
assert isinstance(u3, UnitSphericalRepresentation)
assert_quantity_allclose(u3.lon, self.unit_spherical.lon + 180.0 * u.deg)
assert np.all(u3.lat == -self.unit_spherical.lat)
assert_quantity_allclose(
u3.to_cartesian().xyz,
-self.unit_spherical.to_cartesian().xyz,
atol=1.0e-10 * u.dimensionless_unscaled,
)
u4 = +self.unit_spherical
assert isinstance(u4, UnitSphericalRepresentation)
assert u4 is not self.unit_spherical
assert np.all(representation_equal(u4, self.unit_spherical))
def test_add_sub_cartesian(self):
c1 = self.cartesian + self.cartesian
assert isinstance(c1, CartesianRepresentation)
assert c1.x.dtype.kind == "f"
assert np.all(representation_equal(c1, 2.0 * self.cartesian))
with pytest.raises(TypeError):
self.cartesian + 10.0 * u.m
with pytest.raises(u.UnitsError):
self.cartesian + (self.cartesian / u.s)
c2 = self.cartesian - self.cartesian
assert isinstance(c2, CartesianRepresentation)
assert np.all(
representation_equal(
c2, CartesianRepresentation(0.0 * u.m, 0.0 * u.m, 0.0 * u.m)
)
)
c3 = self.cartesian - self.cartesian / 2.0
assert isinstance(c3, CartesianRepresentation)
assert np.all(representation_equal(c3, self.cartesian / 2.0))
@pytest.mark.parametrize(
"representation",
(
PhysicsSphericalRepresentation,
SphericalRepresentation,
CylindricalRepresentation,
),
)
def test_add_sub(self, representation):
in_rep = self.cartesian.represent_as(representation)
r1 = in_rep + in_rep
assert isinstance(r1, representation)
expected = 2.0 * in_rep
for component in in_rep.components:
assert_quantity_allclose(
getattr(r1, component), getattr(expected, component)
)
with pytest.raises(TypeError):
10.0 * u.m + in_rep
with pytest.raises(u.UnitsError):
in_rep + (in_rep / u.s)
r2 = in_rep - in_rep
assert isinstance(r2, representation)
assert_representation_allclose(
r2.to_cartesian(),
CartesianRepresentation(0.0 * u.m, 0.0 * u.m, 0.0 * u.m),
atol=1e-15 * u.kpc,
)
r3 = in_rep - in_rep / 2.0
assert isinstance(r3, representation)
expected = in_rep / 2.0
assert_representation_allclose(r3, expected)
def test_add_sub_unit_spherical(self):
s1 = self.unit_spherical + self.unit_spherical
assert isinstance(s1, SphericalRepresentation)
expected = 2.0 * self.unit_spherical
for component in s1.components:
assert_quantity_allclose(
getattr(s1, component), getattr(expected, component)
)
with pytest.raises(TypeError):
10.0 * u.m - self.unit_spherical
with pytest.raises(u.UnitsError):
self.unit_spherical + (self.unit_spherical / u.s)
s2 = self.unit_spherical - self.unit_spherical / 2.0
assert isinstance(s2, SphericalRepresentation)
expected = self.unit_spherical / 2.0
for component in s2.components:
assert_quantity_allclose(
getattr(s2, component), getattr(expected, component)
)
@pytest.mark.parametrize(
"representation",
(
CartesianRepresentation,
PhysicsSphericalRepresentation,
SphericalRepresentation,
CylindricalRepresentation,
),
)
def test_sum_mean(self, representation):
in_rep = self.spherical.represent_as(representation)
r_sum = in_rep.sum()
assert isinstance(r_sum, representation)
expected = SphericalRepresentation(
90.0 * u.deg, 0.0 * u.deg, 14.0 * u.kpc
).represent_as(representation)
for component in expected.components:
exp_component = getattr(expected, component)
assert_quantity_allclose(
getattr(r_sum, component),
exp_component,
atol=1e-10 * exp_component.unit,
)
r_mean = in_rep.mean()
assert isinstance(r_mean, representation)
expected = expected / len(in_rep)
for component in expected.components:
exp_component = getattr(expected, component)
assert_quantity_allclose(
getattr(r_mean, component),
exp_component,
atol=1e-10 * exp_component.unit,
)
def test_sum_mean_unit_spherical(self):
s_sum = self.unit_spherical.sum()
assert isinstance(s_sum, SphericalRepresentation)
expected = SphericalRepresentation(
90.0 * u.deg, 0.0 * u.deg, 3.0 * u.dimensionless_unscaled
)
for component in expected.components:
exp_component = getattr(expected, component)
assert_quantity_allclose(
getattr(s_sum, component),
exp_component,
atol=1e-10 * exp_component.unit,
)
s_mean = self.unit_spherical.mean()
assert isinstance(s_mean, SphericalRepresentation)
expected = expected / len(self.unit_spherical)
for component in expected.components:
exp_component = getattr(expected, component)
assert_quantity_allclose(
getattr(s_mean, component),
exp_component,
atol=1e-10 * exp_component.unit,
)
@pytest.mark.parametrize(
"representation",
(
CartesianRepresentation,
PhysicsSphericalRepresentation,
SphericalRepresentation,
CylindricalRepresentation,
),
)
def test_dot(self, representation):
in_rep = self.cartesian.represent_as(representation)
r_dot_r = in_rep.dot(in_rep)
assert isinstance(r_dot_r, u.Quantity)
assert r_dot_r.shape == in_rep.shape
assert_quantity_allclose(np.sqrt(r_dot_r), self.distance)
r_dot_r_rev = in_rep.dot(in_rep[::-1])
assert isinstance(r_dot_r_rev, u.Quantity)
assert r_dot_r_rev.shape == in_rep.shape
expected = [-25.0, -126.0, 2.0, 4.0, 2.0, -126.0, -25.0] * u.kpc**2
assert_quantity_allclose(r_dot_r_rev, expected)
for axis in "xyz":
project = CartesianRepresentation(
*(
(1.0 if axis == _axis else 0.0) * u.dimensionless_unscaled
for _axis in "xyz"
)
)
assert_quantity_allclose(
in_rep.dot(project), getattr(self.cartesian, axis), atol=1.0 * u.upc
)
with pytest.raises(TypeError):
in_rep.dot(self.cartesian.xyz)
def test_dot_unit_spherical(self):
u_dot_u = self.unit_spherical.dot(self.unit_spherical)
assert isinstance(u_dot_u, u.Quantity)
assert u_dot_u.shape == self.unit_spherical.shape
assert_quantity_allclose(u_dot_u, 1.0 * u.dimensionless_unscaled)
cartesian = self.unit_spherical.to_cartesian()
for axis in "xyz":
project = CartesianRepresentation(
*(
(1.0 if axis == _axis else 0.0) * u.dimensionless_unscaled
for _axis in "xyz"
)
)
assert_quantity_allclose(
self.unit_spherical.dot(project), getattr(cartesian, axis), atol=1.0e-10
)
@pytest.mark.parametrize(
"representation",
(
CartesianRepresentation,
PhysicsSphericalRepresentation,
SphericalRepresentation,
CylindricalRepresentation,
),
)
def test_cross(self, representation):
in_rep = self.cartesian.represent_as(representation)
r_cross_r = in_rep.cross(in_rep)
assert isinstance(r_cross_r, representation)
assert_quantity_allclose(
r_cross_r.norm(), 0.0 * u.kpc**2, atol=1.0 * u.mpc**2
)
r_cross_r_rev = in_rep.cross(in_rep[::-1])
sep = angular_separation(self.lon, self.lat, self.lon[::-1], self.lat[::-1])
expected = self.distance * self.distance[::-1] * np.sin(sep)
assert_quantity_allclose(r_cross_r_rev.norm(), expected, atol=1.0 * u.mpc**2)
unit_vectors = CartesianRepresentation(
[1.0, 0.0, 0.0] * u.one, [0.0, 1.0, 0.0] * u.one, [0.0, 0.0, 1.0] * u.one
)[:, np.newaxis]
r_cross_uv = in_rep.cross(unit_vectors)
assert r_cross_uv.shape == (3, 7)
assert_quantity_allclose(
r_cross_uv.dot(unit_vectors), 0.0 * u.kpc, atol=1.0 * u.upc
)
assert_quantity_allclose(
r_cross_uv.dot(in_rep), 0.0 * u.kpc**2, atol=1.0 * u.mpc**2
)
zeros = np.zeros(len(in_rep)) * u.kpc
expected = CartesianRepresentation(
u.Quantity((zeros, -self.cartesian.z, self.cartesian.y)),
u.Quantity((self.cartesian.z, zeros, -self.cartesian.x)),
u.Quantity((-self.cartesian.y, self.cartesian.x, zeros)),
)
# Comparison with spherical is hard since some distances are zero,
# implying the angles are undefined.
r_cross_uv_cartesian = r_cross_uv.to_cartesian()
assert_representation_allclose(r_cross_uv_cartesian, expected, atol=1.0 * u.upc)
# A final check, with the side benefit of ensuring __truediv__ and norm
# work on multi-D representations.
r_cross_uv_by_distance = r_cross_uv / self.distance
uv_sph = unit_vectors.represent_as(UnitSphericalRepresentation)
sep = angular_separation(self.lon, self.lat, uv_sph.lon, uv_sph.lat)
assert_quantity_allclose(r_cross_uv_by_distance.norm(), np.sin(sep), atol=1e-9)
with pytest.raises(TypeError):
in_rep.cross(self.cartesian.xyz)
def test_cross_unit_spherical(self):
u_cross_u = self.unit_spherical.cross(self.unit_spherical)
assert isinstance(u_cross_u, SphericalRepresentation)
assert_quantity_allclose(u_cross_u.norm(), 0.0 * u.one, atol=1.0e-10 * u.one)
u_cross_u_rev = self.unit_spherical.cross(self.unit_spherical[::-1])
assert isinstance(u_cross_u_rev, SphericalRepresentation)
sep = angular_separation(self.lon, self.lat, self.lon[::-1], self.lat[::-1])
expected = np.sin(sep)
assert_quantity_allclose(u_cross_u_rev.norm(), expected, atol=1.0e-10 * u.one)
class TestUnitVectorsAndScales:
@staticmethod
def check_unit_vectors(e):
for v in e.values():
assert type(v) is CartesianRepresentation
assert_quantity_allclose(v.norm(), 1.0 * u.one)
return e
@staticmethod
def check_scale_factors(sf, rep):
unit = rep.norm().unit
for c, f in sf.items():
assert type(f) is u.Quantity
assert (f.unit * getattr(rep, c).unit).is_equivalent(unit)
def test_spherical(self):
s = SphericalRepresentation(
lon=[0.0, 6.0, 21.0] * u.hourangle,
lat=[0.0, -30.0, 85.0] * u.deg,
distance=[1, 2, 3] * u.kpc,
)
e = s.unit_vectors()
self.check_unit_vectors(e)
sf = s.scale_factors()
self.check_scale_factors(sf, s)
s_lon = s + s.distance * 1e-5 * np.cos(s.lat) * e["lon"]
assert_quantity_allclose(s_lon.lon, s.lon + 1e-5 * u.rad, atol=1e-10 * u.rad)
assert_quantity_allclose(s_lon.lat, s.lat, atol=1e-10 * u.rad)
assert_quantity_allclose(s_lon.distance, s.distance)
s_lon2 = s + 1e-5 * u.radian * sf["lon"] * e["lon"]
assert_representation_allclose(s_lon2, s_lon)
s_lat = s + s.distance * 1e-5 * e["lat"]
assert_quantity_allclose(s_lat.lon, s.lon)
assert_quantity_allclose(s_lat.lat, s.lat + 1e-5 * u.rad, atol=1e-10 * u.rad)
assert_quantity_allclose(s_lon.distance, s.distance)
s_lat2 = s + 1.0e-5 * u.radian * sf["lat"] * e["lat"]
assert_representation_allclose(s_lat2, s_lat)
s_distance = s + 1.0 * u.pc * e["distance"]
assert_quantity_allclose(s_distance.lon, s.lon, atol=1e-10 * u.rad)
assert_quantity_allclose(s_distance.lat, s.lat, atol=1e-10 * u.rad)
assert_quantity_allclose(s_distance.distance, s.distance + 1.0 * u.pc)
s_distance2 = s + 1.0 * u.pc * sf["distance"] * e["distance"]
assert_representation_allclose(s_distance2, s_distance)
def test_unit_spherical(self):
s = UnitSphericalRepresentation(
lon=[0.0, 6.0, 21.0] * u.hourangle, lat=[0.0, -30.0, 85.0] * u.deg
)
e = s.unit_vectors()
self.check_unit_vectors(e)
sf = s.scale_factors()
self.check_scale_factors(sf, s)
s_lon = s + 1e-5 * np.cos(s.lat) * e["lon"]
assert_quantity_allclose(s_lon.lon, s.lon + 1e-5 * u.rad, atol=1e-10 * u.rad)
assert_quantity_allclose(s_lon.lat, s.lat, atol=1e-10 * u.rad)
s_lon2 = s + 1e-5 * u.radian * sf["lon"] * e["lon"]
assert_representation_allclose(s_lon2, s_lon)
s_lat = s + 1e-5 * e["lat"]
assert_quantity_allclose(s_lat.lon, s.lon)
assert_quantity_allclose(s_lat.lat, s.lat + 1e-5 * u.rad, atol=1e-10 * u.rad)
s_lat2 = s + 1.0e-5 * u.radian * sf["lat"] * e["lat"]
assert_representation_allclose(s_lat2, s_lat)
def test_radial(self):
r = RadialRepresentation(10.0 * u.kpc)
with pytest.raises(NotImplementedError):
r.unit_vectors()
sf = r.scale_factors()
assert np.all(sf["distance"] == 1.0 * u.one)
assert np.all(r.norm() == r.distance)
with pytest.raises(TypeError):
r + r
def test_physical_spherical(self):
s = PhysicsSphericalRepresentation(
phi=[0.0, 6.0, 21.0] * u.hourangle,
theta=[90.0, 120.0, 5.0] * u.deg,
r=[1, 2, 3] * u.kpc,
)
e = s.unit_vectors()
self.check_unit_vectors(e)
sf = s.scale_factors()
self.check_scale_factors(sf, s)
s_phi = s + s.r * 1e-5 * np.sin(s.theta) * e["phi"]
assert_quantity_allclose(s_phi.phi, s.phi + 1e-5 * u.rad, atol=1e-10 * u.rad)
assert_quantity_allclose(s_phi.theta, s.theta, atol=1e-10 * u.rad)
assert_quantity_allclose(s_phi.r, s.r)
s_phi2 = s + 1e-5 * u.radian * sf["phi"] * e["phi"]
assert_representation_allclose(s_phi2, s_phi)
s_theta = s + s.r * 1e-5 * e["theta"]
assert_quantity_allclose(s_theta.phi, s.phi)
assert_quantity_allclose(
s_theta.theta, s.theta + 1e-5 * u.rad, atol=1e-10 * u.rad
)
assert_quantity_allclose(s_theta.r, s.r)
s_theta2 = s + 1.0e-5 * u.radian * sf["theta"] * e["theta"]
assert_representation_allclose(s_theta2, s_theta)
s_r = s + 1.0 * u.pc * e["r"]
assert_quantity_allclose(s_r.phi, s.phi, atol=1e-10 * u.rad)
assert_quantity_allclose(s_r.theta, s.theta, atol=1e-10 * u.rad)
assert_quantity_allclose(s_r.r, s.r + 1.0 * u.pc)
s_r2 = s + 1.0 * u.pc * sf["r"] * e["r"]
assert_representation_allclose(s_r2, s_r)
def test_cartesian(self):
s = CartesianRepresentation(
x=[1, 2, 3] * u.pc, y=[2, 3, 4] * u.Mpc, z=[3, 4, 5] * u.kpc
)
e = s.unit_vectors()
sf = s.scale_factors()
for v, expected in zip(
e.values(),
([1.0, 0.0, 0.0] * u.one, [0.0, 1.0, 0.0] * u.one, [0.0, 0.0, 1.0] * u.one),
):
assert np.all(v.get_xyz(xyz_axis=-1) == expected)
for f in sf.values():
assert np.all(f == 1.0 * u.one)
def test_cylindrical(self):
s = CylindricalRepresentation(
rho=[1, 2, 3] * u.pc, phi=[0.0, 90.0, -45.0] * u.deg, z=[3, 4, 5] * u.kpc
)
e = s.unit_vectors()
self.check_unit_vectors(e)
sf = s.scale_factors()
self.check_scale_factors(sf, s)
s_rho = s + 1.0 * u.pc * e["rho"]
assert_quantity_allclose(s_rho.rho, s.rho + 1.0 * u.pc)
assert_quantity_allclose(s_rho.phi, s.phi)
assert_quantity_allclose(s_rho.z, s.z)
s_rho2 = s + 1.0 * u.pc * sf["rho"] * e["rho"]
assert_representation_allclose(s_rho2, s_rho)
s_phi = s + s.rho * 1e-5 * e["phi"]
assert_quantity_allclose(s_phi.rho, s.rho)
assert_quantity_allclose(s_phi.phi, s.phi + 1e-5 * u.rad)
assert_quantity_allclose(s_phi.z, s.z)
s_phi2 = s + 1e-5 * u.radian * sf["phi"] * e["phi"]
assert_representation_allclose(s_phi2, s_phi)
s_z = s + 1.0 * u.pc * e["z"]
assert_quantity_allclose(s_z.rho, s.rho)
assert_quantity_allclose(s_z.phi, s.phi, atol=1e-10 * u.rad)
assert_quantity_allclose(s_z.z, s.z + 1.0 * u.pc)
s_z2 = s + 1.0 * u.pc * sf["z"] * e["z"]
assert_representation_allclose(s_z2, s_z)
@pytest.mark.parametrize("omit_coslat", [False, True], scope="class")
class TestSphericalDifferential:
# these test cases are subclassed for SphericalCosLatDifferential,
# hence some tests depend on omit_coslat.
def _setup(self, omit_coslat):
if omit_coslat:
self.SD_cls = SphericalCosLatDifferential
else:
self.SD_cls = SphericalDifferential
s = SphericalRepresentation(
lon=[0.0, 6.0, 21.0] * u.hourangle,
lat=[0.0, -30.0, 85.0] * u.deg,
distance=[1, 2, 3] * u.kpc,
)
self.s = s
self.e = s.unit_vectors()
self.sf = s.scale_factors(omit_coslat=omit_coslat)
def test_name_coslat(self, omit_coslat):
self._setup(omit_coslat)
if omit_coslat:
assert self.SD_cls is SphericalCosLatDifferential
assert self.SD_cls.get_name() == "sphericalcoslat"
else:
assert self.SD_cls is SphericalDifferential
assert self.SD_cls.get_name() == "spherical"
assert self.SD_cls.get_name() in DIFFERENTIAL_CLASSES
def test_simple_differentials(self, omit_coslat):
self._setup(omit_coslat)
s, e, sf = self.s, self.e, self.sf
o_lon = self.SD_cls(1.0 * u.arcsec, 0.0 * u.arcsec, 0.0 * u.kpc)
o_lonc = o_lon.to_cartesian(base=s)
o_lon2 = self.SD_cls.from_cartesian(o_lonc, base=s)
assert_differential_allclose(o_lon, o_lon2)
# simple check by hand for first element.
# lat[0] is 0, so cos(lat) term doesn't matter.
assert_quantity_allclose(
o_lonc[0].xyz, [0.0, np.pi / 180.0 / 3600.0, 0.0] * u.kpc
)
# check all using unit vectors and scale factors.
s_lon = s + 1.0 * u.arcsec * sf["lon"] * e["lon"]
assert_representation_allclose(o_lonc, s_lon - s, atol=1 * u.npc)
s_lon2 = s + o_lon
assert_representation_allclose(s_lon2, s_lon, atol=1 * u.npc)
o_lat = self.SD_cls(0.0 * u.arcsec, 1.0 * u.arcsec, 0.0 * u.kpc)
o_latc = o_lat.to_cartesian(base=s)
assert_quantity_allclose(
o_latc[0].xyz, [0.0, 0.0, np.pi / 180.0 / 3600.0] * u.kpc, atol=1.0 * u.npc
)
s_lat = s + 1.0 * u.arcsec * sf["lat"] * e["lat"]
assert_representation_allclose(o_latc, s_lat - s, atol=1 * u.npc)
s_lat2 = s + o_lat
assert_representation_allclose(s_lat2, s_lat, atol=1 * u.npc)
o_distance = self.SD_cls(0.0 * u.arcsec, 0.0 * u.arcsec, 1.0 * u.mpc)
o_distancec = o_distance.to_cartesian(base=s)
assert_quantity_allclose(
o_distancec[0].xyz, [1e-6, 0.0, 0.0] * u.kpc, atol=1.0 * u.npc
)
s_distance = s + 1.0 * u.mpc * sf["distance"] * e["distance"]
assert_representation_allclose(o_distancec, s_distance - s, atol=1 * u.npc)
s_distance2 = s + o_distance
assert_representation_allclose(s_distance2, s_distance)
def test_differential_arithmetic(self, omit_coslat):
self._setup(omit_coslat)
s = self.s
o_lon = self.SD_cls(1.0 * u.arcsec, 0.0 * u.arcsec, 0.0 * u.kpc)
o_lon_by_2 = o_lon / 2.0
assert_representation_allclose(
o_lon_by_2.to_cartesian(s) * 2.0, o_lon.to_cartesian(s), atol=1e-10 * u.kpc
)
assert_representation_allclose(
s + o_lon, s + 2 * o_lon_by_2, atol=1e-10 * u.kpc
)
o_lon_rec = o_lon_by_2 + o_lon_by_2
assert_representation_allclose(s + o_lon, s + o_lon_rec, atol=1e-10 * u.kpc)
o_lon_0 = o_lon - o_lon
for c in o_lon_0.components:
assert np.all(getattr(o_lon_0, c) == 0.0)
o_lon2 = self.SD_cls(1 * u.mas / u.yr, 0 * u.mas / u.yr, 0 * u.km / u.s)
assert_quantity_allclose(
o_lon2.norm(s)[0], 4.74 * u.km / u.s, atol=0.01 * u.km / u.s
)
assert_representation_allclose(
o_lon2.to_cartesian(s) * 1000.0 * u.yr,
o_lon.to_cartesian(s),
atol=1e-10 * u.kpc,
)
s_off = s + o_lon
s_off2 = s + o_lon2 * 1000.0 * u.yr
assert_representation_allclose(s_off, s_off2, atol=1e-10 * u.kpc)
factor = 1e5 * u.radian / u.arcsec
if not omit_coslat:
factor = factor / np.cos(s.lat)
s_off_big = s + o_lon * factor
assert_representation_allclose(
s_off_big,
SphericalRepresentation(
s.lon + 90.0 * u.deg, 0.0 * u.deg, 1e5 * s.distance
),
atol=5.0 * u.kpc,
)
o_lon3c = CartesianRepresentation(0.0, 4.74047, 0.0, unit=u.km / u.s)
o_lon3 = self.SD_cls.from_cartesian(o_lon3c, base=s)
expected0 = self.SD_cls(
1.0 * u.mas / u.yr, 0.0 * u.mas / u.yr, 0.0 * u.km / u.s
)
assert_differential_allclose(o_lon3[0], expected0)
s_off_big2 = s + o_lon3 * 1e5 * u.yr * u.radian / u.mas
assert_representation_allclose(
s_off_big2,
SphericalRepresentation(90.0 * u.deg, 0.0 * u.deg, 1e5 * u.kpc),
atol=5.0 * u.kpc,
)
with pytest.raises(TypeError):
o_lon - s
with pytest.raises(TypeError):
s.to_cartesian() + o_lon
def test_differential_init_errors(self, omit_coslat):
self._setup(omit_coslat)
s = self.s
with pytest.raises(u.UnitsError):
self.SD_cls(1.0 * u.arcsec, 0.0, 0.0)
with pytest.raises(TypeError):
self.SD_cls(1.0 * u.arcsec, 0.0 * u.arcsec, 0.0 * u.kpc, False, False)
with pytest.raises(TypeError):
self.SD_cls(
1.0 * u.arcsec,
0.0 * u.arcsec,
0.0 * u.kpc,
copy=False,
d_lat=0.0 * u.arcsec,
)
with pytest.raises(TypeError):
self.SD_cls(
1.0 * u.arcsec, 0.0 * u.arcsec, 0.0 * u.kpc, copy=False, flying="circus"
)
with pytest.raises(ValueError):
self.SD_cls(
np.ones(2) * u.arcsec, np.zeros(3) * u.arcsec, np.zeros(2) * u.kpc
)
with pytest.raises(u.UnitsError):
self.SD_cls(1.0 * u.arcsec, 1.0 * u.s, 0.0 * u.kpc)
with pytest.raises(u.UnitsError):
self.SD_cls(1.0 * u.kpc, 1.0 * u.arcsec, 0.0 * u.kpc)
o = self.SD_cls(1.0 * u.arcsec, 1.0 * u.arcsec, 0.0 * u.km / u.s)
with pytest.raises(u.UnitsError):
o.to_cartesian(s)
with pytest.raises(AttributeError):
o.d_lat = 0.0 * u.arcsec
with pytest.raises(AttributeError):
del o.d_lat
o = self.SD_cls(1.0 * u.arcsec, 1.0 * u.arcsec, 0.0 * u.km)
with pytest.raises(TypeError):
o.to_cartesian()
c = CartesianRepresentation(10.0, 0.0, 0.0, unit=u.km)
with pytest.raises(TypeError):
self.SD_cls.to_cartesian(c)
with pytest.raises(TypeError):
self.SD_cls.from_cartesian(c)
with pytest.raises(TypeError):
self.SD_cls.from_cartesian(c, SphericalRepresentation)
@pytest.mark.parametrize("omit_coslat", [False, True], scope="class")
class TestUnitSphericalDifferential:
def _setup(self, omit_coslat):
if omit_coslat:
self.USD_cls = UnitSphericalCosLatDifferential
else:
self.USD_cls = UnitSphericalDifferential
s = UnitSphericalRepresentation(
lon=[0.0, 6.0, 21.0] * u.hourangle, lat=[0.0, -30.0, 85.0] * u.deg
)
self.s = s
self.e = s.unit_vectors()
self.sf = s.scale_factors(omit_coslat=omit_coslat)
def test_name_coslat(self, omit_coslat):
self._setup(omit_coslat)
if omit_coslat:
assert self.USD_cls is UnitSphericalCosLatDifferential
assert self.USD_cls.get_name() == "unitsphericalcoslat"
else:
assert self.USD_cls is UnitSphericalDifferential
assert self.USD_cls.get_name() == "unitspherical"
assert self.USD_cls.get_name() in DIFFERENTIAL_CLASSES
def test_simple_differentials(self, omit_coslat):
self._setup(omit_coslat)
s, e, sf = self.s, self.e, self.sf
o_lon = self.USD_cls(1.0 * u.arcsec, 0.0 * u.arcsec)
o_lonc = o_lon.to_cartesian(base=s)
o_lon2 = self.USD_cls.from_cartesian(o_lonc, base=s)
assert_differential_allclose(o_lon, o_lon2)
# simple check by hand for first element
# (lat[0]=0, so works for both normal and CosLat differential)
assert_quantity_allclose(
o_lonc[0].xyz, [0.0, np.pi / 180.0 / 3600.0, 0.0] * u.one
)
# check all using unit vectors and scale factors.
s_lon = s + 1.0 * u.arcsec * sf["lon"] * e["lon"]
assert type(s_lon) is SphericalRepresentation
assert_representation_allclose(o_lonc, s_lon - s, atol=1e-10 * u.one)
s_lon2 = s + o_lon
assert_representation_allclose(s_lon2, s_lon, atol=1e-10 * u.one)
o_lat = self.USD_cls(0.0 * u.arcsec, 1.0 * u.arcsec)
o_latc = o_lat.to_cartesian(base=s)
assert_quantity_allclose(
o_latc[0].xyz,
[0.0, 0.0, np.pi / 180.0 / 3600.0] * u.one,
atol=1e-10 * u.one,
)
s_lat = s + 1.0 * u.arcsec * sf["lat"] * e["lat"]
assert type(s_lat) is SphericalRepresentation
assert_representation_allclose(o_latc, s_lat - s, atol=1e-10 * u.one)
s_lat2 = s + o_lat
assert_representation_allclose(s_lat2, s_lat, atol=1e-10 * u.one)
def test_differential_arithmetic(self, omit_coslat):
self._setup(omit_coslat)
s = self.s
o_lon = self.USD_cls(1.0 * u.arcsec, 0.0 * u.arcsec)
o_lon_by_2 = o_lon / 2.0
assert type(o_lon_by_2) is self.USD_cls
assert_representation_allclose(
o_lon_by_2.to_cartesian(s) * 2.0, o_lon.to_cartesian(s), atol=1e-10 * u.one
)
s_lon = s + o_lon
s_lon2 = s + 2 * o_lon_by_2
assert type(s_lon) is SphericalRepresentation
assert_representation_allclose(s_lon, s_lon2, atol=1e-10 * u.one)
o_lon_rec = o_lon_by_2 + o_lon_by_2
assert type(o_lon_rec) is self.USD_cls
assert representation_equal(o_lon, o_lon_rec)
assert_representation_allclose(s + o_lon, s + o_lon_rec, atol=1e-10 * u.one)
o_lon_0 = o_lon - o_lon
assert type(o_lon_0) is self.USD_cls
for c in o_lon_0.components:
assert np.all(getattr(o_lon_0, c) == 0.0)
o_lon2 = self.USD_cls(1.0 * u.mas / u.yr, 0.0 * u.mas / u.yr)
kks = u.km / u.kpc / u.s
assert_quantity_allclose(o_lon2.norm(s)[0], 4.74047 * kks, atol=1e-4 * kks)
assert_representation_allclose(
o_lon2.to_cartesian(s) * 1000.0 * u.yr,
o_lon.to_cartesian(s),
atol=1e-10 * u.one,
)
s_off = s + o_lon
s_off2 = s + o_lon2 * 1000.0 * u.yr
assert_representation_allclose(s_off, s_off2, atol=1e-10 * u.one)
factor = 1e5 * u.radian / u.arcsec
if not omit_coslat:
factor = factor / np.cos(s.lat)
s_off_big = s + o_lon * factor
assert_representation_allclose(
s_off_big,
SphericalRepresentation(s.lon + 90.0 * u.deg, 0.0 * u.deg, 1e5),
atol=5.0 * u.one,
)
o_lon3c = CartesianRepresentation(0.0, 4.74047, 0.0, unit=kks)
# This looses information!!
o_lon3 = self.USD_cls.from_cartesian(o_lon3c, base=s)
expected0 = self.USD_cls(1.0 * u.mas / u.yr, 0.0 * u.mas / u.yr)
assert_differential_allclose(o_lon3[0], expected0)
# Part of motion kept.
part_kept = s.cross(CartesianRepresentation(0, 1, 0, unit=u.one)).norm()
assert_quantity_allclose(
o_lon3.norm(s), 4.74047 * part_kept * kks, atol=1e-10 * kks
)
# (lat[0]=0, so works for both normal and CosLat differential)
s_off_big2 = s + o_lon3 * 1e5 * u.yr * u.radian / u.mas
expected0 = SphericalRepresentation(90.0 * u.deg, 0.0 * u.deg, 1e5 * u.one)
assert_representation_allclose(s_off_big2[0], expected0, atol=5.0 * u.one)
def test_differential_init_errors(self, omit_coslat):
self._setup(omit_coslat)
with pytest.raises(u.UnitsError):
self.USD_cls(0.0 * u.deg, 10.0 * u.deg / u.yr)
class TestRadialDifferential:
def setup_method(self):
s = SphericalRepresentation(
lon=[0.0, 6.0, 21.0] * u.hourangle,
lat=[0.0, -30.0, 85.0] * u.deg,
distance=[1, 2, 3] * u.kpc,
)
self.s = s
self.r = s.represent_as(RadialRepresentation)
self.e = s.unit_vectors()
self.sf = s.scale_factors()
def test_name(self):
assert RadialDifferential.get_name() == "radial"
assert RadialDifferential.get_name() in DIFFERENTIAL_CLASSES
def test_simple_differentials(self):
r, s, e, sf = self.r, self.s, self.e, self.sf
o_distance = RadialDifferential(1.0 * u.mpc)
# Can be applied to RadialRepresentation, though not most useful.
r_distance = r + o_distance
assert_quantity_allclose(
r_distance.distance, r.distance + o_distance.d_distance
)
r_distance2 = o_distance + r
assert_quantity_allclose(
r_distance2.distance, r.distance + o_distance.d_distance
)
# More sense to apply it relative to spherical representation.
o_distancec = o_distance.to_cartesian(base=s)
assert_quantity_allclose(
o_distancec[0].xyz, [1e-6, 0.0, 0.0] * u.kpc, atol=1.0 * u.npc
)
o_recover = RadialDifferential.from_cartesian(o_distancec, base=s)
assert_quantity_allclose(o_recover.d_distance, o_distance.d_distance)
s_distance = s + 1.0 * u.mpc * sf["distance"] * e["distance"]
assert_representation_allclose(o_distancec, s_distance - s, atol=1 * u.npc)
s_distance2 = s + o_distance
assert_representation_allclose(s_distance2, s_distance)
class TestPhysicsSphericalDifferential:
"""Test copied from SphericalDifferential, so less extensive."""
def setup_method(self):
s = PhysicsSphericalRepresentation(
phi=[0.0, 90.0, 315.0] * u.deg,
theta=[90.0, 120.0, 5.0] * u.deg,
r=[1, 2, 3] * u.kpc,
)
self.s = s
self.e = s.unit_vectors()
self.sf = s.scale_factors()
def test_name(self):
assert PhysicsSphericalDifferential.get_name() == "physicsspherical"
assert PhysicsSphericalDifferential.get_name() in DIFFERENTIAL_CLASSES
def test_simple_differentials(self):
s, e, sf = self.s, self.e, self.sf
o_phi = PhysicsSphericalDifferential(1 * u.arcsec, 0 * u.arcsec, 0 * u.kpc)
o_phic = o_phi.to_cartesian(base=s)
o_phi2 = PhysicsSphericalDifferential.from_cartesian(o_phic, base=s)
assert_quantity_allclose(o_phi.d_phi, o_phi2.d_phi, atol=1.0 * u.narcsec)
assert_quantity_allclose(o_phi.d_theta, o_phi2.d_theta, atol=1.0 * u.narcsec)
assert_quantity_allclose(o_phi.d_r, o_phi2.d_r, atol=1.0 * u.npc)
# simple check by hand for first element.
assert_quantity_allclose(
o_phic[0].xyz, [0.0, np.pi / 180.0 / 3600.0, 0.0] * u.kpc, atol=1.0 * u.npc
)
# check all using unit vectors and scale factors.
s_phi = s + 1.0 * u.arcsec * sf["phi"] * e["phi"]
assert_representation_allclose(o_phic, s_phi - s, atol=1e-10 * u.kpc)
o_theta = PhysicsSphericalDifferential(0 * u.arcsec, 1 * u.arcsec, 0 * u.kpc)
o_thetac = o_theta.to_cartesian(base=s)
assert_quantity_allclose(
o_thetac[0].xyz,
[0.0, 0.0, -np.pi / 180.0 / 3600.0] * u.kpc,
atol=1.0 * u.npc,
)
s_theta = s + 1.0 * u.arcsec * sf["theta"] * e["theta"]
assert_representation_allclose(o_thetac, s_theta - s, atol=1e-10 * u.kpc)
s_theta2 = s + o_theta
assert_representation_allclose(s_theta2, s_theta, atol=1e-10 * u.kpc)
o_r = PhysicsSphericalDifferential(0 * u.arcsec, 0 * u.arcsec, 1 * u.mpc)
o_rc = o_r.to_cartesian(base=s)
assert_quantity_allclose(
o_rc[0].xyz, [1e-6, 0.0, 0.0] * u.kpc, atol=1.0 * u.npc
)
s_r = s + 1.0 * u.mpc * sf["r"] * e["r"]
assert_representation_allclose(o_rc, s_r - s, atol=1e-10 * u.kpc)
s_r2 = s + o_r
assert_representation_allclose(s_r2, s_r)
def test_differential_init_errors(self):
with pytest.raises(u.UnitsError):
PhysicsSphericalDifferential(1.0 * u.arcsec, 0.0, 0.0)
class TestCylindricalDifferential:
"""Test copied from SphericalDifferential, so less extensive."""
def setup_method(self):
s = CylindricalRepresentation(
rho=[1, 2, 3] * u.kpc, phi=[0.0, 90.0, 315.0] * u.deg, z=[3, 2, 1] * u.kpc
)
self.s = s
self.e = s.unit_vectors()
self.sf = s.scale_factors()
def test_name(self):
assert CylindricalDifferential.get_name() == "cylindrical"
assert CylindricalDifferential.get_name() in DIFFERENTIAL_CLASSES
def test_simple_differentials(self):
s, e, sf = self.s, self.e, self.sf
o_rho = CylindricalDifferential(1.0 * u.mpc, 0.0 * u.arcsec, 0.0 * u.kpc)
o_rhoc = o_rho.to_cartesian(base=s)
assert_quantity_allclose(o_rhoc[0].xyz, [1.0e-6, 0.0, 0.0] * u.kpc)
s_rho = s + 1.0 * u.mpc * sf["rho"] * e["rho"]
assert_representation_allclose(o_rhoc, s_rho - s, atol=1e-10 * u.kpc)
s_rho2 = s + o_rho
assert_representation_allclose(s_rho2, s_rho)
o_phi = CylindricalDifferential(0.0 * u.kpc, 1.0 * u.arcsec, 0.0 * u.kpc)
o_phic = o_phi.to_cartesian(base=s)
o_phi2 = CylindricalDifferential.from_cartesian(o_phic, base=s)
assert_quantity_allclose(o_phi.d_rho, o_phi2.d_rho, atol=1.0 * u.npc)
assert_quantity_allclose(o_phi.d_phi, o_phi2.d_phi, atol=1.0 * u.narcsec)
assert_quantity_allclose(o_phi.d_z, o_phi2.d_z, atol=1.0 * u.npc)
# simple check by hand for first element.
assert_quantity_allclose(
o_phic[0].xyz, [0.0, np.pi / 180.0 / 3600.0, 0.0] * u.kpc
)
# check all using unit vectors and scale factors.
s_phi = s + 1.0 * u.arcsec * sf["phi"] * e["phi"]
assert_representation_allclose(o_phic, s_phi - s, atol=1e-10 * u.kpc)
o_z = CylindricalDifferential(0.0 * u.kpc, 0.0 * u.arcsec, 1.0 * u.mpc)
o_zc = o_z.to_cartesian(base=s)
assert_quantity_allclose(o_zc[0].xyz, [0.0, 0.0, 1.0e-6] * u.kpc)
s_z = s + 1.0 * u.mpc * sf["z"] * e["z"]
assert_representation_allclose(o_zc, s_z - s, atol=1e-10 * u.kpc)
s_z2 = s + o_z
assert_representation_allclose(s_z2, s_z)
def test_differential_init_errors(self):
with pytest.raises(u.UnitsError):
CylindricalDifferential(1.0 * u.pc, 1.0 * u.arcsec, 3.0 * u.km / u.s)
class TestCartesianDifferential:
"""Test copied from SphericalDifferential, so less extensive."""
def setup_method(self):
s = CartesianRepresentation(
x=[1, 2, 3] * u.kpc, y=[2, 3, 1] * u.kpc, z=[3, 1, 2] * u.kpc
)
self.s = s
self.e = s.unit_vectors()
self.sf = s.scale_factors()
def test_name(self):
assert CartesianDifferential.get_name() == "cartesian"
assert CartesianDifferential.get_name() in DIFFERENTIAL_CLASSES
def test_simple_differentials(self):
s, e, sf = self.s, self.e, self.sf
for d, differential in ( # test different inits while we're at it.
("x", CartesianDifferential(1.0 * u.pc, 0.0 * u.pc, 0.0 * u.pc)),
("y", CartesianDifferential([0.0, 1.0, 0.0], unit=u.pc)),
(
"z",
CartesianDifferential(np.array([[0.0, 0.0, 1.0]]) * u.pc, xyz_axis=1),
),
):
o_c = differential.to_cartesian(base=s)
o_c2 = differential.to_cartesian()
assert np.all(representation_equal(o_c, o_c2))
assert all(
np.all(getattr(differential, "d_" + c) == getattr(o_c, c))
for c in ("x", "y", "z")
)
differential2 = CartesianDifferential.from_cartesian(o_c)
assert np.all(representation_equal(differential2, differential))
differential3 = CartesianDifferential.from_cartesian(o_c, base=o_c)
assert np.all(representation_equal(differential3, differential))
s_off = s + 1.0 * u.pc * sf[d] * e[d]
assert_representation_allclose(o_c, s_off - s, atol=1e-10 * u.kpc)
s_off2 = s + differential
assert_representation_allclose(s_off2, s_off)
def test_init_failures(self):
with pytest.raises(ValueError):
CartesianDifferential(1.0 * u.kpc / u.s, 2.0 * u.kpc)
with pytest.raises(u.UnitsError):
CartesianDifferential(1.0 * u.kpc / u.s, 2.0 * u.kpc, 3.0 * u.kpc)
with pytest.raises(ValueError):
CartesianDifferential(1.0 * u.kpc, 2.0 * u.kpc, 3.0 * u.kpc, xyz_axis=1)
class TestDifferentialConversion:
def setup_method(self):
self.s = SphericalRepresentation(
lon=[0.0, 6.0, 21.0] * u.hourangle,
lat=[0.0, -30.0, 85.0] * u.deg,
distance=[1, 2, 3] * u.kpc,
)
@pytest.mark.parametrize(
"sd_cls", [SphericalDifferential, SphericalCosLatDifferential]
)
def test_represent_as_own_class(self, sd_cls):
so = sd_cls(1.0 * u.deg, 2.0 * u.deg, 0.1 * u.kpc)
so2 = so.represent_as(sd_cls)
assert so2 is so
def test_represent_other_coslat(self):
s = self.s
coslat = np.cos(s.lat)
so = SphericalDifferential(1.0 * u.deg, 2.0 * u.deg, 0.1 * u.kpc)
so_coslat = so.represent_as(SphericalCosLatDifferential, base=s)
assert_quantity_allclose(so.d_lon * coslat, so_coslat.d_lon_coslat)
so2 = so_coslat.represent_as(SphericalDifferential, base=s)
assert np.all(representation_equal(so2, so))
so3 = SphericalDifferential.from_representation(so_coslat, base=s)
assert np.all(representation_equal(so3, so))
so_coslat2 = SphericalCosLatDifferential.from_representation(so, base=s)
assert np.all(representation_equal(so_coslat2, so_coslat))
# Also test UnitSpherical
us = s.represent_as(UnitSphericalRepresentation)
uo = so.represent_as(UnitSphericalDifferential)
uo_coslat = so.represent_as(UnitSphericalCosLatDifferential, base=s)
assert_quantity_allclose(uo.d_lon * coslat, uo_coslat.d_lon_coslat)
uo2 = uo_coslat.represent_as(UnitSphericalDifferential, base=us)
assert np.all(representation_equal(uo2, uo))
uo3 = UnitSphericalDifferential.from_representation(uo_coslat, base=us)
assert np.all(representation_equal(uo3, uo))
uo_coslat2 = UnitSphericalCosLatDifferential.from_representation(uo, base=us)
assert np.all(representation_equal(uo_coslat2, uo_coslat))
uo_coslat3 = uo.represent_as(UnitSphericalCosLatDifferential, base=us)
assert np.all(representation_equal(uo_coslat3, uo_coslat))
@pytest.mark.parametrize(
"sd_cls", [SphericalDifferential, SphericalCosLatDifferential]
)
@pytest.mark.parametrize(
"r_cls",
(
SphericalRepresentation,
UnitSphericalRepresentation,
PhysicsSphericalRepresentation,
CylindricalRepresentation,
),
)
def test_represent_regular_class(self, sd_cls, r_cls):
so = sd_cls(1.0 * u.deg, 2.0 * u.deg, 0.1 * u.kpc)
r = so.represent_as(r_cls, base=self.s)
c = so.to_cartesian(self.s)
r_check = c.represent_as(r_cls)
assert np.all(representation_equal(r, r_check))
so2 = sd_cls.from_representation(r, base=self.s)
so3 = sd_cls.from_cartesian(r.to_cartesian(), self.s)
assert np.all(representation_equal(so2, so3))
@pytest.mark.parametrize(
"sd_cls", [SphericalDifferential, SphericalCosLatDifferential]
)
def test_convert_physics(self, sd_cls):
# Conversion needs no base for SphericalDifferential, but does
# need one (to get the latitude) for SphericalCosLatDifferential.
if sd_cls is SphericalDifferential:
usd_cls = UnitSphericalDifferential
base_s = base_u = base_p = None
else:
usd_cls = UnitSphericalCosLatDifferential
base_s = self.s[1]
base_u = base_s.represent_as(UnitSphericalRepresentation)
base_p = base_s.represent_as(PhysicsSphericalRepresentation)
so = sd_cls(1.0 * u.deg, 2.0 * u.deg, 0.1 * u.kpc)
po = so.represent_as(PhysicsSphericalDifferential, base=base_s)
so2 = sd_cls.from_representation(po, base=base_s)
assert_differential_allclose(so, so2)
po2 = PhysicsSphericalDifferential.from_representation(so, base=base_p)
assert_differential_allclose(po, po2)
so3 = po.represent_as(sd_cls, base=base_p)
assert_differential_allclose(so, so3)
s = self.s
p = s.represent_as(PhysicsSphericalRepresentation)
cso = so.to_cartesian(s[1])
cpo = po.to_cartesian(p[1])
assert_representation_allclose(cso, cpo)
assert_representation_allclose(s[1] + so, p[1] + po)
po2 = so.represent_as(
PhysicsSphericalDifferential, base=None if base_s is None else s
)
assert_representation_allclose(s + so, p + po2)
suo = usd_cls.from_representation(so)
puo = usd_cls.from_representation(po, base=base_u)
assert_differential_allclose(suo, puo)
suo2 = so.represent_as(usd_cls)
puo2 = po.represent_as(usd_cls, base=base_p)
assert_differential_allclose(suo2, puo2)
assert_differential_allclose(puo, puo2)
sro = RadialDifferential.from_representation(so)
pro = RadialDifferential.from_representation(po)
assert representation_equal(sro, pro)
sro2 = so.represent_as(RadialDifferential)
pro2 = po.represent_as(RadialDifferential)
assert representation_equal(sro2, pro2)
assert representation_equal(pro, pro2)
@pytest.mark.parametrize(
("sd_cls", "usd_cls"),
[
(SphericalDifferential, UnitSphericalDifferential),
(SphericalCosLatDifferential, UnitSphericalCosLatDifferential),
],
)
def test_convert_unit_spherical_radial(self, sd_cls, usd_cls):
s = self.s
us = s.represent_as(UnitSphericalRepresentation)
rs = s.represent_as(RadialRepresentation)
assert_representation_allclose(rs * us, s)
uo = usd_cls(2.0 * u.deg, 1.0 * u.deg)
so = uo.represent_as(sd_cls, base=s)
assert_quantity_allclose(so.d_distance, 0.0 * u.kpc, atol=1.0 * u.npc)
uo2 = so.represent_as(usd_cls)
assert_representation_allclose(uo.to_cartesian(us), uo2.to_cartesian(us))
so1 = sd_cls(2.0 * u.deg, 1.0 * u.deg, 5.0 * u.pc)
uo_r = so1.represent_as(usd_cls)
ro_r = so1.represent_as(RadialDifferential)
assert np.all(representation_equal(uo_r, uo))
assert np.all(representation_equal(ro_r, RadialDifferential(5.0 * u.pc)))
@pytest.mark.parametrize(
"sd_cls", [SphericalDifferential, SphericalCosLatDifferential]
)
def test_convert_cylindrial(self, sd_cls):
s = self.s
so = sd_cls(1.0 * u.deg, 2.0 * u.deg, 0.1 * u.kpc)
cyo = so.represent_as(CylindricalDifferential, base=s)
cy = s.represent_as(CylindricalRepresentation)
so1 = cyo.represent_as(sd_cls, base=cy)
assert_representation_allclose(so.to_cartesian(s), so1.to_cartesian(s))
cyo2 = CylindricalDifferential.from_representation(so, base=cy)
assert_representation_allclose(
cyo2.to_cartesian(base=cy), cyo.to_cartesian(base=cy)
)
so2 = sd_cls.from_representation(cyo2, base=s)
assert_representation_allclose(so.to_cartesian(s), so2.to_cartesian(s))
@pytest.mark.parametrize(
"sd_cls", [SphericalDifferential, SphericalCosLatDifferential]
)
def test_combinations(self, sd_cls):
if sd_cls is SphericalDifferential:
uo = UnitSphericalDifferential(2.0 * u.deg, 1.0 * u.deg)
uo_d_lon = uo.d_lon
else:
uo = UnitSphericalCosLatDifferential(2.0 * u.deg, 1.0 * u.deg)
uo_d_lon = uo.d_lon_coslat
ro = RadialDifferential(1.0 * u.mpc)
so1 = uo + ro
so1c = sd_cls(uo_d_lon, uo.d_lat, ro.d_distance)
assert np.all(representation_equal(so1, so1c))
so2 = uo - ro
so2c = sd_cls(uo_d_lon, uo.d_lat, -ro.d_distance)
assert np.all(representation_equal(so2, so2c))
so3 = so2 + ro
so3c = sd_cls(uo_d_lon, uo.d_lat, 0.0 * u.kpc)
assert np.all(representation_equal(so3, so3c))
so4 = so1 + ro
so4c = sd_cls(uo_d_lon, uo.d_lat, 2 * ro.d_distance)
assert np.all(representation_equal(so4, so4c))
so5 = so1 - uo
so5c = sd_cls(0 * u.deg, 0.0 * u.deg, ro.d_distance)
assert np.all(representation_equal(so5, so5c))
assert_representation_allclose(self.s + (uo + ro), self.s + so1)
@pytest.mark.parametrize(
"op,args",
[
(operator.neg, ()),
(operator.pos, ()),
(operator.mul, (-8.0,)),
(operator.truediv, ([4.0, 8.0] * u.s,)),
],
scope="class",
)
class TestArithmeticWithDifferentials:
def setup_class(self):
self.cr = CartesianRepresentation([1, 2, 3] * u.kpc)
self.cd = CartesianDifferential([0.1, -0.2, 0.3] * u.km / u.s)
self.c = self.cr.with_differentials(self.cd)
def test_operation_cartesian(self, op, args):
ncr = op(self.c, *args)
expected = (op(self.cr, *args)).with_differentials(op(self.cd, *args))
assert np.all(ncr == expected)
def test_operation_radial(self, op, args):
rep = self.c.represent_as(RadialRepresentation, {"s": RadialDifferential})
result = op(rep, *args)
expected_distance = op(self.cr.norm(), *args)
expected_rv = op((self.cr / self.cr.norm()).dot(self.cd), *args)
assert u.allclose(result.distance, expected_distance)
assert u.allclose(result.differentials["s"].d_distance, expected_rv)
@pytest.mark.parametrize(
"diff_cls",
[
SphericalDifferential,
SphericalCosLatDifferential,
PhysicsSphericalDifferential,
CylindricalDifferential,
],
)
def test_operation_other(self, diff_cls, op, args):
rep_cls = diff_cls.base_representation
rep = self.c.represent_as(rep_cls, {"s": diff_cls})
result = op(rep, *args)
expected_c = op(self.c, *args)
expected = expected_c.represent_as(rep_cls, {"s": diff_cls})
# Check that we match in the representation itself.
assert_representation_allclose(result, expected)
assert_differential_allclose(
result.differentials["s"], expected.differentials["s"]
)
# Check that we compare correctly in cartesian as well, just to be sure.
result_c = result.represent_as(
CartesianRepresentation, {"s": CartesianDifferential}
)
assert_representation_allclose(result_c, expected_c)
assert_differential_allclose(
result_c.differentials["s"], expected_c.differentials["s"]
)
@pytest.mark.parametrize(
"rep_cls",
[
SphericalRepresentation,
PhysicsSphericalRepresentation,
CylindricalRepresentation,
],
)
def test_operation_cartesian_differential(self, rep_cls, op, args):
rep = self.c.represent_as(rep_cls, {"s": CartesianDifferential})
result = op(rep, *args)
expected_c = op(self.c, *args)
expected = expected_c.represent_as(rep_cls, {"s": CartesianDifferential})
# Check that we match in the representation itself.
assert_representation_allclose(result, expected)
assert_differential_allclose(
result.differentials["s"], expected.differentials["s"]
)
@pytest.mark.parametrize(
"diff_cls", [UnitSphericalDifferential, UnitSphericalCosLatDifferential]
)
def test_operation_unit_spherical(self, diff_cls, op, args):
rep_cls = diff_cls.base_representation
rep = self.c.represent_as(rep_cls, {"s": diff_cls})
result = op(rep, *args)
if op not in (operator.neg, operator.pos):
expected_cls = rep._dimensional_representation
else:
expected_cls = rep_cls
assert type(result) is expected_cls
assert type(result.differentials["s"]) is diff_cls
# Have lost information, so unlike above we convert our initial
# unit-spherical back to Cartesian, and check that applying
# the operation on that cartesian representation gives the same result.
# We do not compare the output directly, since for multiplication
# and division there will be sign flips in the spherical distance.
expected_c = op(
rep.represent_as(CartesianRepresentation, {"s": CartesianDifferential}),
*args
)
result_c = result.represent_as(
CartesianRepresentation, {"s": CartesianDifferential}
)
assert_representation_allclose(result_c, expected_c)
assert_differential_allclose(
result_c.differentials["s"], expected_c.differentials["s"]
)
@pytest.mark.parametrize(
"diff_cls",
[
RadialDifferential,
UnitSphericalDifferential,
UnitSphericalCosLatDifferential,
],
)
def test_operation_spherical_with_rv_or_pm(self, diff_cls, op, args):
rep = self.c.represent_as(SphericalRepresentation, {"s": diff_cls})
result = op(rep, *args)
assert type(result) is SphericalRepresentation
assert type(result.differentials["s"]) is diff_cls
expected_c = op(
rep.represent_as(CartesianRepresentation, {"s": CartesianDifferential}),
*args
)
result_c = result.represent_as(
CartesianRepresentation, {"s": CartesianDifferential}
)
assert_representation_allclose(result_c, expected_c)
assert_differential_allclose(
result_c.differentials["s"], expected_c.differentials["s"]
)
@pytest.mark.parametrize("op,args", [(operator.neg, ()), (operator.mul, (10.0,))])
def test_operation_unitspherical_with_rv_fails(op, args):
rep = UnitSphericalRepresentation(
0 * u.deg, 0 * u.deg, differentials={"s": RadialDifferential(10 * u.km / u.s)}
)
with pytest.raises(ValueError, match="unit key"):
op(rep, *args)
@pytest.mark.parametrize(
"rep,dif",
[
[
CartesianRepresentation([1, 2, 3] * u.kpc),
CartesianDifferential([0.1, 0.2, 0.3] * u.km / u.s),
],
[
SphericalRepresentation(90 * u.deg, 0.0 * u.deg, 14.0 * u.kpc),
SphericalDifferential(1.0 * u.deg, 2.0 * u.deg, 0.1 * u.kpc),
],
],
)
def test_arithmetic_with_differentials_fail(rep, dif):
rep = rep.with_differentials(dif)
with pytest.raises(TypeError):
rep + rep
with pytest.raises(TypeError):
rep - rep
with pytest.raises(TypeError):
rep * rep
with pytest.raises(TypeError):
rep / rep
|
8409dd79cb804c1678c3fd09d464195e367cc07926b0ab6b03d35dc5ca859337 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import erfa
import numpy as np
import pytest
from numpy import testing as npt
from astropy import units as u
from astropy.coordinates import EarthLocation, SkyCoord
from astropy.coordinates.angle_utilities import golden_spiral_grid
from astropy.coordinates.builtin_frames import ICRS, AltAz
from astropy.coordinates.builtin_frames.utils import get_jd12
from astropy.time import Time
from astropy.utils import iers
# These fixtures are used in test_iau_fullstack
@pytest.fixture(scope="function")
def fullstack_icrs():
rep = golden_spiral_grid(size=1000)
return ICRS(rep)
@pytest.fixture(scope="function")
def fullstack_fiducial_altaz(fullstack_icrs):
altazframe = AltAz(
location=EarthLocation(lat=0 * u.deg, lon=0 * u.deg, height=0 * u.m),
obstime=Time("J2000"),
)
with warnings.catch_warnings(): # Ignore remote_data warning
warnings.simplefilter("ignore")
result = fullstack_icrs.transform_to(altazframe)
return result
@pytest.fixture(scope="function", params=["J2000.1", "J2010"])
def fullstack_times(request):
return Time(request.param)
@pytest.fixture(
scope="function",
params=[(0, 0, 0), (23, 0, 0), (-70, 0, 0), (0, 100, 0), (23, 0, 3000)],
)
def fullstack_locations(request):
value = request.param[0]
return EarthLocation(lat=value * u.deg, lon=value * u.deg, height=value * u.m)
@pytest.fixture(
scope="function",
params=[
(0 * u.bar, 0 * u.deg_C, 0, 1 * u.micron),
(1 * u.bar, 0 * u.deg_C, 0 * u.one, 1 * u.micron),
(1 * u.bar, 10 * u.deg_C, 0, 1 * u.micron),
(1 * u.bar, 0 * u.deg_C, 50 * u.percent, 1 * u.micron),
(1 * u.bar, 0 * u.deg_C, 0, 21 * u.cm),
],
)
def fullstack_obsconditions(request):
return request.param
def _erfa_check(ira, idec, astrom):
"""
This function does the same thing the astropy layer is supposed to do, but
all in erfa
"""
cra, cdec = erfa.atciq(ira, idec, 0, 0, 0, 0, astrom)
az, zen, ha, odec, ora = erfa.atioq(cra, cdec, astrom)
alt = np.pi / 2 - zen
cra2, cdec2 = erfa.atoiq("A", az, zen, astrom)
ira2, idec2 = erfa.aticq(cra2, cdec2, astrom)
dct = locals()
del dct["astrom"]
return dct
def test_iau_fullstack(
fullstack_icrs,
fullstack_fiducial_altaz,
fullstack_times,
fullstack_locations,
fullstack_obsconditions,
):
"""
Test the full transform from ICRS <-> AltAz
"""
# create the altaz frame
altazframe = AltAz(
obstime=fullstack_times,
location=fullstack_locations,
pressure=fullstack_obsconditions[0],
temperature=fullstack_obsconditions[1],
relative_humidity=fullstack_obsconditions[2],
obswl=fullstack_obsconditions[3],
)
aacoo = fullstack_icrs.transform_to(altazframe)
# compare aacoo to the fiducial AltAz - should always be different
assert np.all(
np.abs(aacoo.alt - fullstack_fiducial_altaz.alt) > 50 * u.milliarcsecond
)
assert np.all(
np.abs(aacoo.az - fullstack_fiducial_altaz.az) > 50 * u.milliarcsecond
)
# if the refraction correction is included, we *only* do the comparisons
# where altitude >5 degrees. The SOFA guides imply that below 5 is where
# where accuracy gets more problematic, and testing reveals that alt<~0
# gives garbage round-tripping, and <10 can give ~1 arcsec uncertainty
if fullstack_obsconditions[0].value == 0:
# but if there is no refraction correction, check everything
msk = slice(None)
tol = 5 * u.microarcsecond
else:
msk = aacoo.alt > 5 * u.deg
# most of them aren't this bad, but some of those at low alt are offset
# this much. For alt > 10, this is always better than 100 masec
tol = 750 * u.milliarcsecond
# now make sure the full stack round-tripping works
icrs2 = aacoo.transform_to(ICRS())
adras = np.abs(fullstack_icrs.ra - icrs2.ra)[msk]
addecs = np.abs(fullstack_icrs.dec - icrs2.dec)[msk]
assert np.all(
adras < tol
), f"largest RA change is {np.max(adras.arcsec * 1000)} mas, > {tol}"
assert np.all(
addecs < tol
), f"largest Dec change is {np.max(addecs.arcsec * 1000)} mas, > {tol}"
# check that we're consistent with the ERFA alt/az result
iers_tab = iers.earth_orientation_table.get()
xp, yp = u.Quantity(iers_tab.pm_xy(fullstack_times)).to_value(u.radian)
lon = fullstack_locations.geodetic[0].to_value(u.radian)
lat = fullstack_locations.geodetic[1].to_value(u.radian)
height = fullstack_locations.geodetic[2].to_value(u.m)
jd1, jd2 = get_jd12(fullstack_times, "utc")
pressure = fullstack_obsconditions[0].to_value(u.hPa)
temperature = fullstack_obsconditions[1].to_value(u.deg_C)
# Relative humidity can be a quantity or a number.
relative_humidity = u.Quantity(fullstack_obsconditions[2], u.one).value
obswl = fullstack_obsconditions[3].to_value(u.micron)
astrom, eo = erfa.apco13(
jd1,
jd2,
fullstack_times.delta_ut1_utc,
lon,
lat,
height,
xp,
yp,
pressure,
temperature,
relative_humidity,
obswl,
)
erfadct = _erfa_check(fullstack_icrs.ra.rad, fullstack_icrs.dec.rad, astrom)
npt.assert_allclose(erfadct["alt"], aacoo.alt.radian, atol=1e-7)
npt.assert_allclose(erfadct["az"], aacoo.az.radian, atol=1e-7)
def test_fiducial_roudtrip(fullstack_icrs, fullstack_fiducial_altaz):
"""
Test the full transform from ICRS <-> AltAz
"""
aacoo = fullstack_icrs.transform_to(fullstack_fiducial_altaz)
# make sure the round-tripping works
icrs2 = aacoo.transform_to(ICRS())
npt.assert_allclose(fullstack_icrs.ra.deg, icrs2.ra.deg)
npt.assert_allclose(fullstack_icrs.dec.deg, icrs2.dec.deg)
def test_future_altaz():
"""
While this does test the full stack, it is mostly meant to check that a
warning is raised when attempting to get to AltAz in the future (beyond
IERS tables)
"""
# this is an ugly hack to get the warning to show up even if it has already
# appeared
from astropy.coordinates.builtin_frames import utils
from astropy.utils.exceptions import AstropyWarning
if hasattr(utils, "__warningregistry__"):
utils.__warningregistry__.clear()
location = EarthLocation(lat=0 * u.deg, lon=0 * u.deg)
t = Time("J2161")
# check that these message(s) appear among any other warnings. If tests are run with
# --remote-data then the IERS table will be an instance of IERS_Auto which is
# assured of being "fresh". In this case getting times outside the range of the
# table does not raise an exception. Only if using IERS_B (which happens without
# --remote-data, i.e. for all CI testing) do we expect another warning.
with pytest.warns(
AstropyWarning,
match=r"Tried to get polar motions for " "times after IERS data is valid.*",
) as found_warnings:
SkyCoord(1 * u.deg, 2 * u.deg).transform_to(AltAz(location=location, obstime=t))
if isinstance(iers.earth_orientation_table.get(), iers.IERS_B):
assert any(
"(some) times are outside of range covered by IERS table." in str(w.message)
for w in found_warnings
)
|
1653ea77c2df794a378f2acaa26112fdb4ca9b39f58d6f8882e8fcf2a5fad441 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test replacements for ERFA functions atciqz and aticq."""
import erfa
import pytest
import astropy.units as u
from astropy.coordinates import SphericalRepresentation
from astropy.coordinates.builtin_frames.utils import atciqz, aticq, get_jd12
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.time import Time
# Hard-coded random values
sph = SphericalRepresentation(
lon=[15.0, 214.0] * u.deg, lat=[-12.0, 64.0] * u.deg, distance=[1, 1.0]
)
@pytest.mark.parametrize(
"t", [Time("2014-06-25T00:00"), Time(["2014-06-25T00:00", "2014-09-24"])]
)
@pytest.mark.parametrize("pos", [sph[0], sph])
def test_atciqz_aticq(t, pos):
"""Check replacements against erfa versions for consistency."""
jd1, jd2 = get_jd12(t, "tdb")
astrom, _ = erfa.apci13(jd1, jd2)
ra = pos.lon.to_value(u.rad)
dec = pos.lat.to_value(u.rad)
assert_allclose(erfa.atciqz(ra, dec, astrom), atciqz(pos, astrom))
assert_allclose(erfa.aticq(ra, dec, astrom), aticq(pos, astrom))
|
88bd9c97707dad3060027827573eaa285b8c01412e0bc74d35de3671206b9e13 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from astropy import units as u
from astropy.coordinates.matrix_utilities import (
angle_axis,
is_O3,
is_rotation,
matrix_product,
rotation_matrix,
)
from astropy.utils.exceptions import AstropyDeprecationWarning
def test_rotation_matrix():
assert_array_equal(rotation_matrix(0 * u.deg, "x"), np.eye(3))
assert_allclose(
rotation_matrix(90 * u.deg, "y"), [[0, 0, -1], [0, 1, 0], [1, 0, 0]], atol=1e-12
)
assert_allclose(
rotation_matrix(-90 * u.deg, "z"),
[[0, -1, 0], [1, 0, 0], [0, 0, 1]],
atol=1e-12,
)
assert_allclose(
rotation_matrix(45 * u.deg, "x"), rotation_matrix(45 * u.deg, [1, 0, 0])
)
assert_allclose(
rotation_matrix(125 * u.deg, "y"), rotation_matrix(125 * u.deg, [0, 1, 0])
)
assert_allclose(
rotation_matrix(-30 * u.deg, "z"), rotation_matrix(-30 * u.deg, [0, 0, 1])
)
assert_allclose(
np.dot(rotation_matrix(180 * u.deg, [1, 1, 0]), [1, 0, 0]),
[0, 1, 0],
atol=1e-12,
)
# make sure it also works for very small angles
assert_allclose(
rotation_matrix(0.000001 * u.deg, "x"),
rotation_matrix(0.000001 * u.deg, [1, 0, 0]),
)
def test_angle_axis():
m1 = rotation_matrix(35 * u.deg, "x")
an1, ax1 = angle_axis(m1)
assert an1 - 35 * u.deg < 1e-10 * u.deg
assert_allclose(ax1, [1, 0, 0])
m2 = rotation_matrix(-89 * u.deg, [1, 1, 0])
an2, ax2 = angle_axis(m2)
assert an2 - 89 * u.deg < 1e-10 * u.deg
assert_allclose(ax2, [-(2**-0.5), -(2**-0.5), 0])
def test_is_O3():
"""Test the matrix checker ``is_O3``."""
# Normal rotation matrix
m1 = rotation_matrix(35 * u.deg, "x")
assert is_O3(m1)
# and (M, 3, 3)
n1 = np.tile(m1, (2, 1, 1))
assert tuple(is_O3(n1)) == (True, True) # (show the broadcasting)
# reflection
m2 = m1.copy()
m2[0, 0] *= -1
assert is_O3(m2)
# and (M, 3, 3)
n2 = np.stack((m1, m2))
assert tuple(is_O3(n2)) == (True, True) # (show the broadcasting)
# Not any sort of O(3)
m3 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert not is_O3(m3)
# and (M, 3, 3)
n3 = np.stack((m1, m3))
assert tuple(is_O3(n3)) == (True, False) # (show the broadcasting)
def test_is_rotation():
"""Test the rotation matrix checker ``is_rotation``."""
# Normal rotation matrix
m1 = rotation_matrix(35 * u.deg, "x")
assert is_rotation(m1)
assert is_rotation(m1, allow_improper=True) # (a less restrictive test)
# and (M, 3, 3)
n1 = np.tile(m1, (2, 1, 1))
assert tuple(is_rotation(n1)) == (True, True) # (show the broadcasting)
# Improper rotation (unit rotation + reflection)
m2 = np.identity(3)
m2[0, 0] = -1
assert not is_rotation(m2)
assert is_rotation(m2, allow_improper=True)
# and (M, 3, 3)
n2 = np.stack((m1, m2))
assert tuple(is_rotation(n2)) == (True, False) # (show the broadcasting)
# Not any sort of rotation
m3 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert not is_rotation(m3)
assert not is_rotation(m3, allow_improper=True)
# and (M, 3, 3)
n3 = np.stack((m1, m3))
assert tuple(is_rotation(n3)) == (True, False) # (show the broadcasting)
def test_matrix_product_deprecation():
with pytest.warns(AstropyDeprecationWarning, match=r"Use @ instead\.$"):
matrix_product(np.eye(2))
|
67cb212b6434d7876d6e99ad4aeac5df10aea6f7295848460c1a3cc5c1ce8bc9 | import numpy as np
import pytest
import astropy.units as u
from astropy.coordinates import CIRS, GCRS, AltAz, EarthLocation, SkyCoord
from astropy.coordinates.erfa_astrom import (
ErfaAstrom,
ErfaAstromInterpolator,
erfa_astrom,
)
from astropy.time import Time
from astropy.utils.exceptions import AstropyWarning
def test_science_state():
assert erfa_astrom.get().__class__ is ErfaAstrom
res = 300 * u.s
with erfa_astrom.set(ErfaAstromInterpolator(res)):
assert isinstance(erfa_astrom.get(), ErfaAstromInterpolator)
erfa_astrom.get().mjd_resolution == res.to_value(u.day)
# context manager should have switched it back
assert erfa_astrom.get().__class__ is ErfaAstrom
# must be a subclass of BaseErfaAstrom
with pytest.raises(TypeError):
erfa_astrom.set("foo")
def test_warnings():
with pytest.warns(AstropyWarning):
with erfa_astrom.set(ErfaAstromInterpolator(9 * u.us)):
pass
def test_erfa_astrom():
# I was having a pretty hard time in coming
# up with a unit test only testing the astrom provider
# that would not just test its implementation with its implementation
# so we test a coordinate conversion using it
location = EarthLocation(
lon=-17.891105 * u.deg,
lat=28.761584 * u.deg,
height=2200 * u.m,
)
obstime = Time("2020-01-01T18:00") + np.linspace(0, 1, 100) * u.hour
altaz = AltAz(location=location, obstime=obstime)
coord = SkyCoord(ra=83.63308333, dec=22.0145, unit=u.deg)
# do the reference transformation, no interpolation
ref = coord.transform_to(altaz)
with erfa_astrom.set(ErfaAstromInterpolator(300 * u.s)):
interp_300s = coord.transform_to(altaz)
# make sure they are actually different
assert np.any(ref.separation(interp_300s) > 0.005 * u.microarcsecond)
# make sure the resolution is as good as we expect
assert np.all(ref.separation(interp_300s) < 1 * u.microarcsecond)
def test_interpolation_nd():
"""
Test that the interpolation also works for nd-arrays
"""
fact = EarthLocation(
lon=-17.891105 * u.deg,
lat=28.761584 * u.deg,
height=2200 * u.m,
)
interp_provider = ErfaAstromInterpolator(300 * u.s)
provider = ErfaAstrom()
for shape in [tuple(), (1,), (10,), (3, 2), (2, 10, 5), (4, 5, 3, 2)]:
# create obstimes of the desired shapes
delta_t = np.linspace(0, 12, np.prod(shape, dtype=int)) * u.hour
obstime = (Time("2020-01-01T18:00") + delta_t).reshape(shape)
altaz = AltAz(location=fact, obstime=obstime)
gcrs = GCRS(obstime=obstime)
cirs = CIRS(obstime=obstime)
for frame, tcode in zip([altaz, cirs, gcrs], ["apio", "apco", "apcs"]):
without_interp = getattr(provider, tcode)(frame)
assert without_interp.shape == shape
with_interp = getattr(interp_provider, tcode)(frame)
assert with_interp.shape == shape
def test_interpolation_broadcasting():
import astropy.units as u
from astropy.coordinates import AltAz, EarthLocation, SkyCoord
from astropy.coordinates.angle_utilities import golden_spiral_grid
from astropy.coordinates.erfa_astrom import ErfaAstromInterpolator, erfa_astrom
from astropy.time import Time
# 1000 gridded locations on the sky
rep = golden_spiral_grid(100)
coord = SkyCoord(rep)
# 30 times over the space of 1 hours
times = Time("2020-01-01T20:00") + np.linspace(-0.5, 0.5, 30) * u.hour
lst1 = EarthLocation(
lon=-17.891498 * u.deg,
lat=28.761443 * u.deg,
height=2200 * u.m,
)
# note the use of broadcasting so that 300 times are broadcast against 1000 positions
aa_frame = AltAz(obstime=times[:, np.newaxis], location=lst1)
aa_coord = coord.transform_to(aa_frame)
with erfa_astrom.set(ErfaAstromInterpolator(300 * u.s)):
aa_coord_interp = coord.transform_to(aa_frame)
assert aa_coord.shape == aa_coord_interp.shape
assert np.all(aa_coord.separation(aa_coord_interp) < 1 * u.microarcsecond)
|
1db9f58acabf206959f199e5145f720beb1300b1d74fb8b88dcf18d9eaa0df1b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Regression tests for coordinates-related bugs that don't have an obvious other
place to live
"""
import copy
import io
from contextlib import nullcontext
import numpy as np
import pytest
from erfa import ErfaWarning
from astropy import units as u
from astropy.coordinates import (
CIRS,
FK4,
GCRS,
HCRS,
ICRS,
ITRS,
AltAz,
BaseCoordinateFrame,
CartesianDifferential,
CartesianRepresentation,
CylindricalDifferential,
CylindricalRepresentation,
EarthLocation,
FK4NoETerms,
FunctionTransform,
GeocentricMeanEcliptic,
Latitude,
Longitude,
QuantityAttribute,
SkyCoord,
SphericalRepresentation,
UnitSphericalRepresentation,
get_body,
get_moon,
get_sun,
)
from astropy.coordinates.sites import get_builtin_sites
from astropy.table import Table
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import Time
from astropy.units import allclose as quantity_allclose
from astropy.utils import iers
from astropy.utils.compat.optional_deps import HAS_SCIPY
def test_regression_5085():
"""
PR #5085 was put in place to fix the following issue.
Issue: https://github.com/astropy/astropy/issues/5069
At root was the transformation of Ecliptic coordinates with
non-scalar times.
"""
# Note: for regression test, we need to be sure that we use UTC for the
# epoch, even though more properly that should be TT; but the "expected"
# values were calculated using that.
j2000 = Time("J2000", scale="utc")
times = Time(["2015-08-28 03:30", "2015-09-05 10:30", "2015-09-15 18:35"])
latitudes = Latitude([3.9807075, -5.00733806, 1.69539491] * u.deg)
longitudes = Longitude([311.79678613, 72.86626741, 199.58698226] * u.deg)
distances = u.Quantity([0.00243266, 0.0025424, 0.00271296] * u.au)
coo = GeocentricMeanEcliptic(
lat=latitudes, lon=longitudes, distance=distances, obstime=times, equinox=times
)
# expected result
ras = Longitude([310.50095400, 314.67109920, 319.56507428] * u.deg)
decs = Latitude([-18.25190443, -17.1556676, -15.71616522] * u.deg)
distances = u.Quantity([1.78309901, 1.710874, 1.61326649] * u.au)
expected_result = GCRS(
ra=ras, dec=decs, distance=distances, obstime=j2000
).cartesian.xyz
actual_result = coo.transform_to(GCRS(obstime=j2000)).cartesian.xyz
assert_quantity_allclose(expected_result, actual_result)
def test_regression_3920():
"""
Issue: https://github.com/astropy/astropy/issues/3920
"""
loc = EarthLocation.from_geodetic(0 * u.deg, 0 * u.deg, 0)
time = Time("2010-1-1")
aa = AltAz(location=loc, obstime=time)
sc = SkyCoord(10 * u.deg, 3 * u.deg)
assert sc.transform_to(aa).shape == tuple()
# That part makes sense: the input is a scalar so the output is too
sc2 = SkyCoord(10 * u.deg, 3 * u.deg, 1 * u.AU)
assert sc2.transform_to(aa).shape == tuple()
# in 3920 that assert fails, because the shape is (1,)
# check that the same behavior occurs even if transform is from low-level classes
icoo = ICRS(sc.data)
icoo2 = ICRS(sc2.data)
assert icoo.transform_to(aa).shape == tuple()
assert icoo2.transform_to(aa).shape == tuple()
def test_regression_3938():
"""
Issue: https://github.com/astropy/astropy/issues/3938
"""
# Set up list of targets - we don't use `from_name` here to avoid
# remote_data requirements, but it does the same thing
# vega = SkyCoord.from_name('Vega')
vega = SkyCoord(279.23473479 * u.deg, 38.78368896 * u.deg)
# capella = SkyCoord.from_name('Capella')
capella = SkyCoord(79.17232794 * u.deg, 45.99799147 * u.deg)
# sirius = SkyCoord.from_name('Sirius')
sirius = SkyCoord(101.28715533 * u.deg, -16.71611586 * u.deg)
targets = [vega, capella, sirius]
# Feed list of targets into SkyCoord
combined_coords = SkyCoord(targets)
# Set up AltAz frame
time = Time("2012-01-01 00:00:00")
location = EarthLocation("10d", "45d", 0)
aa = AltAz(location=location, obstime=time)
combined_coords.transform_to(aa)
# in 3938 the above yields ``UnitConversionError: '' (dimensionless) and 'pc' (length) are not convertible``
def test_regression_3998():
"""
Issue: https://github.com/astropy/astropy/issues/3998
"""
time = Time("2012-01-01 00:00:00")
assert time.isscalar
sun = get_sun(time)
assert sun.isscalar
# in 3998, the above yields False - `sun` is a length-1 vector
assert sun.obstime is time
def test_regression_4033():
"""
Issue: https://github.com/astropy/astropy/issues/4033
"""
# alb = SkyCoord.from_name('Albireo')
alb = SkyCoord(292.68033548 * u.deg, 27.95968007 * u.deg)
alb_wdist = SkyCoord(alb, distance=133 * u.pc)
# de = SkyCoord.from_name('Deneb')
de = SkyCoord(310.35797975 * u.deg, 45.28033881 * u.deg)
de_wdist = SkyCoord(de, distance=802 * u.pc)
aa = AltAz(
location=EarthLocation(lat=45 * u.deg, lon=0 * u.deg), obstime="2010-1-1"
)
deaa = de.transform_to(aa)
albaa = alb.transform_to(aa)
alb_wdistaa = alb_wdist.transform_to(aa)
de_wdistaa = de_wdist.transform_to(aa)
# these work fine
sepnod = deaa.separation(albaa)
sepwd = deaa.separation(alb_wdistaa)
assert_quantity_allclose(sepnod, 22.2862 * u.deg, rtol=1e-6)
assert_quantity_allclose(sepwd, 22.2862 * u.deg, rtol=1e-6)
# parallax should be present when distance added
assert np.abs(sepnod - sepwd) > 1 * u.marcsec
# in 4033, the following fail with a recursion error
assert_quantity_allclose(
de_wdistaa.separation(alb_wdistaa), 22.2862 * u.deg, rtol=1e-3
)
assert_quantity_allclose(alb_wdistaa.separation(deaa), 22.2862 * u.deg, rtol=1e-3)
@pytest.mark.skipif(not HAS_SCIPY, reason="No Scipy")
def test_regression_4082():
"""
Issue: https://github.com/astropy/astropy/issues/4082
"""
from astropy.coordinates import search_around_3d, search_around_sky
cat = SkyCoord([10.076, 10.00455], [18.54746, 18.54896], unit="deg")
search_around_sky(cat[0:1], cat, seplimit=u.arcsec * 60, storekdtree=False)
# in the issue, this raises a TypeError
# also check 3d for good measure, although it's not really affected by this bug directly
cat3d = SkyCoord(
[10.076, 10.00455] * u.deg,
[18.54746, 18.54896] * u.deg,
distance=[0.1, 1.5] * u.kpc,
)
search_around_3d(cat3d[0:1], cat3d, 1 * u.kpc, storekdtree=False)
def test_regression_4210():
"""
Issue: https://github.com/astropy/astropy/issues/4210
Related PR with actual change: https://github.com/astropy/astropy/pull/4211
"""
crd = SkyCoord(0 * u.deg, 0 * u.deg, distance=1 * u.AU)
ecl = crd.geocentricmeanecliptic
# bug was that "lambda", which at the time was the name of the geocentric
# ecliptic longitude, is a reserved keyword. So this just makes sure the
# new name is are all valid
ecl.lon
# and for good measure, check the other ecliptic systems are all the same
# names for their attributes
from astropy.coordinates.builtin_frames import ecliptic
for frame_name in ecliptic.__all__:
eclcls = getattr(ecliptic, frame_name)
eclobj = eclcls(1 * u.deg, 2 * u.deg, 3 * u.AU)
eclobj.lat
eclobj.lon
eclobj.distance
def test_regression_futuretimes_4302():
"""
Checks that an error is not raised for future times not covered by IERS
tables (at least in a simple transform like CIRS->ITRS that simply requires
the UTC<->UT1 conversion).
Relevant comment: https://github.com/astropy/astropy/pull/4302#discussion_r44836531
"""
# this is an ugly hack to get the warning to show up even if it has already
# appeared
from astropy.coordinates.builtin_frames import utils
from astropy.utils.exceptions import AstropyWarning
if hasattr(utils, "__warningregistry__"):
utils.__warningregistry__.clear()
# check that out-of-range warning appears among any other warnings. If
# tests are run with --remote-data then the IERS table will be an instance
# of IERS_Auto which is assured of being "fresh". In this case getting
# times outside the range of the table does not raise an exception. Only
# if using IERS_B (which happens without --remote-data, i.e. for all CI
# testing) do we expect another warning.
if isinstance(iers.earth_orientation_table.get(), iers.IERS_B):
ctx = pytest.warns(
AstropyWarning,
match=r"\(some\) times are outside of range covered by IERS table.*",
)
else:
ctx = nullcontext()
with ctx:
future_time = Time("2511-5-1")
c = CIRS(1 * u.deg, 2 * u.deg, obstime=future_time)
c.transform_to(ITRS(obstime=future_time))
def test_regression_4996():
# this part is the actual regression test
deltat = np.linspace(-12, 12, 1000) * u.hour
times = Time("2012-7-13 00:00:00") + deltat
suncoo = get_sun(times)
assert suncoo.shape == (len(times),)
# and this is an additional test to make sure more complex arrays work
times2 = Time("2012-7-13 00:00:00") + deltat.reshape(10, 20, 5)
suncoo2 = get_sun(times2)
assert suncoo2.shape == times2.shape
# this is intentionally not allclose - they should be *exactly* the same
assert np.all(suncoo.ra.ravel() == suncoo2.ra.ravel())
def test_regression_4293():
"""Really just an extra test on FK4 no e, after finding that the units
were not always taken correctly. This test is against explicitly doing
the transformations on pp170 of Explanatory Supplement to the Astronomical
Almanac (Seidelmann, 2005).
See https://github.com/astropy/astropy/pull/4293#issuecomment-234973086
"""
# Check all over sky, but avoiding poles (note that FK4 did not ignore
# e terms within 10∘ of the poles... see p170 of explan.supp.).
ra, dec = np.meshgrid(np.arange(0, 359, 45), np.arange(-80, 81, 40))
fk4 = FK4(ra.ravel() * u.deg, dec.ravel() * u.deg)
Dc = -0.065838 * u.arcsec
Dd = +0.335299 * u.arcsec
# Dc * tan(obliquity), as given on p.170
Dctano = -0.028553 * u.arcsec
fk4noe_dec = (
fk4.dec
- (Dd * np.cos(fk4.ra) - Dc * np.sin(fk4.ra)) * np.sin(fk4.dec)
- Dctano * np.cos(fk4.dec)
)
fk4noe_ra = fk4.ra - (Dc * np.cos(fk4.ra) + Dd * np.sin(fk4.ra)) / np.cos(fk4.dec)
fk4noe = fk4.transform_to(FK4NoETerms())
# Tolerance here just set to how well the coordinates match, which is much
# better than the claimed accuracy of <1 mas for this first-order in
# v_earth/c approximation.
# Interestingly, if one divides by np.cos(fk4noe_dec) in the ra correction,
# the match becomes good to 2 μas.
assert_quantity_allclose(fk4noe.ra, fk4noe_ra, atol=11.0 * u.uas, rtol=0)
assert_quantity_allclose(fk4noe.dec, fk4noe_dec, atol=3.0 * u.uas, rtol=0)
def test_regression_4926():
times = Time("2010-01-1") + np.arange(20) * u.day
green = get_builtin_sites()["greenwich"]
# this is the regression test
moon = get_moon(times, green)
# this is an additional test to make sure the GCRS->ICRS transform works for complex shapes
moon.transform_to(ICRS())
# and some others to increase coverage of transforms
moon.transform_to(HCRS(obstime="J2000"))
moon.transform_to(HCRS(obstime=times))
def test_regression_5209():
"check that distances are not lost on SkyCoord init"
time = Time("2015-01-01")
moon = get_moon(time)
new_coord = SkyCoord([moon])
assert_quantity_allclose(new_coord[0].distance, moon.distance)
def test_regression_5133():
N = 1000
np.random.seed(12345)
lon = np.random.uniform(-10, 10, N) * u.deg
lat = np.random.uniform(50, 52, N) * u.deg
alt = np.random.uniform(0, 10.0, N) * u.km
time = Time("2010-1-1")
objects = EarthLocation.from_geodetic(lon, lat, height=alt)
itrs_coo = objects.get_itrs(time)
homes = [
EarthLocation.from_geodetic(lon=-1 * u.deg, lat=52 * u.deg, height=h)
for h in (0, 1000, 10000) * u.km
]
altaz_frames = [AltAz(obstime=time, location=h) for h in homes]
altaz_coos = [itrs_coo.transform_to(f) for f in altaz_frames]
# they should all be different
for coo in altaz_coos[1:]:
assert not quantity_allclose(coo.az, coo.az[0])
assert not quantity_allclose(coo.alt, coo.alt[0])
def test_itrs_vals_5133():
"""
Test to check if alt-az calculations respect height of observer
Because ITRS is geocentric and includes aberration, an object that
appears 'straight up' to a geocentric observer (ITRS) won't be
straight up to a topocentric observer - see
https://github.com/astropy/astropy/issues/10983
This is worse for small height above the Earth, which is why this test
uses large distances.
"""
time = Time("2010-1-1")
height = 500000.0 * u.km
el = EarthLocation.from_geodetic(lon=20 * u.deg, lat=45 * u.deg, height=height)
lons = [20, 30, 20] * u.deg
lats = [44, 45, 45] * u.deg
alts = u.Quantity([height, height, 10 * height])
coos = [
EarthLocation.from_geodetic(lon, lat, height=alt).get_itrs(time)
for lon, lat, alt in zip(lons, lats, alts)
]
aaf = AltAz(obstime=time, location=el)
aacs = [coo.transform_to(aaf) for coo in coos]
assert all([coo.isscalar for coo in aacs])
# the ~1 degree tolerance is b/c aberration makes it not exact
assert_quantity_allclose(aacs[0].az, 180 * u.deg, atol=1 * u.deg)
assert aacs[0].alt < 0 * u.deg
assert aacs[0].distance > 5000 * u.km
# it should *not* actually be 90 degrees, b/c constant latitude is not
# straight east anywhere except the equator... but should be close-ish
assert_quantity_allclose(aacs[1].az, 90 * u.deg, atol=5 * u.deg)
assert aacs[1].alt < 0 * u.deg
assert aacs[1].distance > 5000 * u.km
assert_quantity_allclose(aacs[2].alt, 90 * u.deg, atol=1 * u.arcminute)
assert_quantity_allclose(aacs[2].distance, 9 * height)
def test_regression_simple_5133():
"""
Simple test to check if alt-az calculations respect height of observer
Because ITRS is geocentric and includes aberration, an object that
appears 'straight up' to a geocentric observer (ITRS) won't be
straight up to a topocentric observer - see
https://github.com/astropy/astropy/issues/10983
This is why we construct a topocentric GCRS SkyCoord before calculating AltAz
"""
t = Time("J2010")
obj = EarthLocation(-1 * u.deg, 52 * u.deg, height=[10.0, 0.0] * u.km)
home = EarthLocation(-1 * u.deg, 52 * u.deg, height=5.0 * u.km)
obsloc_gcrs, obsvel_gcrs = home.get_gcrs_posvel(t)
gcrs_geo = obj.get_itrs(t).transform_to(GCRS(obstime=t))
obsrepr = home.get_itrs(t).transform_to(GCRS(obstime=t)).cartesian
topo_gcrs_repr = gcrs_geo.cartesian - obsrepr
topocentric_gcrs_frame = GCRS(
obstime=t, obsgeoloc=obsloc_gcrs, obsgeovel=obsvel_gcrs
)
gcrs_topo = topocentric_gcrs_frame.realize_frame(topo_gcrs_repr)
aa = gcrs_topo.transform_to(AltAz(obstime=t, location=home))
# az is more-or-less undefined for straight up or down
assert_quantity_allclose(aa.alt, [90, -90] * u.deg, rtol=1e-7)
assert_quantity_allclose(aa.distance, 5 * u.km)
def test_regression_5743():
sc = SkyCoord(
[5, 10], [20, 30], unit=u.deg, obstime=["2017-01-01T00:00", "2017-01-01T00:10"]
)
assert sc[0].obstime.shape == tuple()
def test_regression_5889_5890():
# ensure we can represent all Representations and transform to ND frames
greenwich = EarthLocation(
*u.Quantity([3980608.90246817, -102.47522911, 4966861.27310067], unit=u.m)
)
times = Time("2017-03-20T12:00:00") + np.linspace(-2, 2, 3) * u.hour
moon = get_moon(times, location=greenwich)
targets = SkyCoord([350.7 * u.deg, 260.7 * u.deg], [18.4 * u.deg, 22.4 * u.deg])
targs2d = targets[:, np.newaxis]
targs2d.transform_to(moon)
def test_regression_6236():
# sunpy changes its representation upon initialisation of a frame,
# including via `realize_frame`. Ensure this works.
class MyFrame(BaseCoordinateFrame):
default_representation = CartesianRepresentation
my_attr = QuantityAttribute(default=0, unit=u.m)
class MySpecialFrame(MyFrame):
def __init__(self, *args, **kwargs):
_rep_kwarg = kwargs.get("representation_type", None)
super().__init__(*args, **kwargs)
if not _rep_kwarg:
self.representation_type = self.default_representation
self._data = self.data.represent_as(self.representation_type)
rep1 = UnitSphericalRepresentation([0.0, 1] * u.deg, [2.0, 3.0] * u.deg)
rep2 = SphericalRepresentation(
[10.0, 11] * u.deg, [12.0, 13.0] * u.deg, [14.0, 15.0] * u.kpc
)
mf1 = MyFrame(rep1, my_attr=1.0 * u.km)
mf2 = mf1.realize_frame(rep2)
# Normally, data is stored as is, but the representation gets set to a
# default, even if a different representation instance was passed in.
# realize_frame should do the same. Just in case, check attrs are passed.
assert mf1.data is rep1
assert mf2.data is rep2
assert mf1.representation_type is CartesianRepresentation
assert mf2.representation_type is CartesianRepresentation
assert mf2.my_attr == mf1.my_attr
# It should be independent of whether I set the representation explicitly
mf3 = MyFrame(rep1, my_attr=1.0 * u.km, representation_type="unitspherical")
mf4 = mf3.realize_frame(rep2)
assert mf3.data is rep1
assert mf4.data is rep2
assert mf3.representation_type is UnitSphericalRepresentation
assert mf4.representation_type is CartesianRepresentation
assert mf4.my_attr == mf3.my_attr
# This should be enough to help sunpy, but just to be sure, a test
# even closer to what is done there, i.e., transform the representation.
msf1 = MySpecialFrame(rep1, my_attr=1.0 * u.km)
msf2 = msf1.realize_frame(rep2)
assert msf1.data is not rep1 # Gets transformed to Cartesian.
assert msf2.data is not rep2
assert type(msf1.data) is CartesianRepresentation
assert type(msf2.data) is CartesianRepresentation
assert msf1.representation_type is CartesianRepresentation
assert msf2.representation_type is CartesianRepresentation
assert msf2.my_attr == msf1.my_attr
# And finally a test where the input is not transformed.
msf3 = MySpecialFrame(rep1, my_attr=1.0 * u.km, representation_type="unitspherical")
msf4 = msf3.realize_frame(rep2)
assert msf3.data is rep1
assert msf4.data is not rep2
assert msf3.representation_type is UnitSphericalRepresentation
assert msf4.representation_type is CartesianRepresentation
assert msf4.my_attr == msf3.my_attr
@pytest.mark.skipif(not HAS_SCIPY, reason="No Scipy")
def test_regression_6347():
sc1 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg)
sc2 = SkyCoord([1.1, 2.1] * u.deg, [3.1, 4.1] * u.deg)
sc0 = sc1[:0]
idx1_10, idx2_10, d2d_10, d3d_10 = sc1.search_around_sky(sc2, 10 * u.arcmin)
idx1_1, idx2_1, d2d_1, d3d_1 = sc1.search_around_sky(sc2, 1 * u.arcmin)
idx1_0, idx2_0, d2d_0, d3d_0 = sc0.search_around_sky(sc2, 10 * u.arcmin)
assert len(d2d_10) == 2
assert len(d2d_0) == 0
assert type(d2d_0) is type(d2d_10)
assert len(d2d_1) == 0
assert type(d2d_1) is type(d2d_10)
@pytest.mark.skipif(not HAS_SCIPY, reason="No Scipy")
def test_regression_6347_3d():
sc1 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, [5, 6] * u.kpc)
sc2 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, [5.1, 6.1] * u.kpc)
sc0 = sc1[:0]
idx1_10, idx2_10, d2d_10, d3d_10 = sc1.search_around_3d(sc2, 500 * u.pc)
idx1_1, idx2_1, d2d_1, d3d_1 = sc1.search_around_3d(sc2, 50 * u.pc)
idx1_0, idx2_0, d2d_0, d3d_0 = sc0.search_around_3d(sc2, 500 * u.pc)
assert len(d2d_10) > 0
assert len(d2d_0) == 0
assert type(d2d_0) is type(d2d_10)
assert len(d2d_1) == 0
assert type(d2d_1) is type(d2d_10)
def test_gcrs_itrs_cartesian_repr():
# issue 6436: transformation failed if coordinate representation was
# Cartesian
gcrs = GCRS(
CartesianRepresentation((859.07256, -4137.20368, 5295.56871), unit="km"),
representation_type="cartesian",
)
gcrs.transform_to(ITRS())
def test_regression_6446():
# this succeeds even before 6446:
sc1 = SkyCoord([1, 2], [3, 4], unit="deg")
t1 = Table([sc1])
sio1 = io.StringIO()
t1.write(sio1, format="ascii.ecsv")
# but this fails due to the 6446 bug
c1 = SkyCoord(1, 3, unit="deg")
c2 = SkyCoord(2, 4, unit="deg")
sc2 = SkyCoord([c1, c2])
t2 = Table([sc2])
sio2 = io.StringIO()
t2.write(sio2, format="ascii.ecsv")
assert sio1.getvalue() == sio2.getvalue()
def test_regression_6597():
frame_name = "galactic"
c1 = SkyCoord(1, 3, unit="deg", frame=frame_name)
c2 = SkyCoord(2, 4, unit="deg", frame=frame_name)
sc1 = SkyCoord([c1, c2])
assert sc1.frame.name == frame_name
def test_regression_6597_2():
"""
This tests the more subtle flaw that #6597 indirectly uncovered: that even
in the case that the frames are ra/dec, they still might be the wrong *kind*
"""
frame = FK4(equinox="J1949")
c1 = SkyCoord(1, 3, unit="deg", frame=frame)
c2 = SkyCoord(2, 4, unit="deg", frame=frame)
sc1 = SkyCoord([c1, c2])
assert sc1.frame.name == frame.name
def test_regression_6697():
"""
Test for regression of a bug in get_gcrs_posvel that introduced errors at the 1m/s level.
Comparison data is derived from calculation in PINT
https://github.com/nanograv/PINT/blob/master/pint/erfautils.py
"""
pint_vels = CartesianRepresentation(
348.63632871, -212.31704928, -0.60154936, unit=u.m / u.s
)
location = EarthLocation(
5327448.9957829, -1718665.73869569, 3051566.90295403, unit=u.m
)
t = Time(2458036.161966612, format="jd")
obsgeopos, obsgeovel = location.get_gcrs_posvel(t)
delta = (obsgeovel - pint_vels).norm()
assert delta < 1 * u.cm / u.s
def test_regression_8138():
sc = SkyCoord(1 * u.deg, 2 * u.deg)
newframe = GCRS()
sc2 = sc.transform_to(newframe)
assert newframe.is_equivalent_frame(sc2.frame)
def test_regression_8276():
from astropy.coordinates import baseframe
class MyFrame(BaseCoordinateFrame):
a = QuantityAttribute(unit=u.m)
# we save the transform graph so that it doesn't acidentally mess with other tests
old_transform_graph = baseframe.frame_transform_graph
try:
baseframe.frame_transform_graph = copy.copy(baseframe.frame_transform_graph)
# as reported in 8276, this previously failed right here because
# registering the transform tries to create a frame attribute
@baseframe.frame_transform_graph.transform(FunctionTransform, MyFrame, AltAz)
def trans(my_frame_coord, altaz_frame):
pass
# should also be able to *create* the Frame at this point
MyFrame()
finally:
baseframe.frame_transform_graph = old_transform_graph
def test_regression_8615():
# note this is a "higher-level" symptom of the problem that a test now moved
# to pyerfa (erfa/tests/test_erfa:test_float32_input) is testing for, but we keep
# it here as well due to being a more practical version of the issue.
crf = CartesianRepresentation(np.array([3, 0, 4], dtype=float) * u.pc)
srf = SphericalRepresentation.from_cartesian(crf) # does not error in 8615
cr = CartesianRepresentation(np.array([3, 0, 4], dtype="f4") * u.pc)
sr = SphericalRepresentation.from_cartesian(cr) # errors in 8615
assert_quantity_allclose(sr.distance, 5 * u.pc)
assert_quantity_allclose(srf.distance, 5 * u.pc)
def test_regression_8924():
"""This checks that the ValueError in
BaseRepresentation._re_represent_differentials is raised properly
"""
# A case where the representation has a 's' differential, but we try to
# re-represent only with an 's2' differential
rep = CartesianRepresentation(1, 2, 3, unit=u.kpc)
dif = CartesianDifferential(4, 5, 6, u.km / u.s)
rep = rep.with_differentials(dif)
with pytest.raises(ValueError):
rep._re_represent_differentials(
CylindricalRepresentation, {"s2": CylindricalDifferential}
)
def test_regression_10092():
"""
Check that we still get a proper motion even for SkyCoords without distance
"""
c = SkyCoord(
l=10 * u.degree,
b=45 * u.degree,
pm_l_cosb=34 * u.mas / u.yr,
pm_b=-117 * u.mas / u.yr,
frame="galactic",
obstime=Time("1988-12-18 05:11:23.5"),
)
with pytest.warns(ErfaWarning, match='ERFA function "pmsafe" yielded .*'):
newc = c.apply_space_motion(dt=10 * u.year)
assert_quantity_allclose(
newc.pm_l_cosb, 33.99980714 * u.mas / u.yr, atol=1.0e-5 * u.mas / u.yr
)
def test_regression_10226():
# Dictionary representation of SkyCoord should contain differentials.
sc = SkyCoord(
[270, 280] * u.deg,
[30, 35] * u.deg,
[10, 11] * u.pc,
radial_velocity=[20, -20] * u.km / u.s,
)
sc_as_dict = sc.info._represent_as_dict()
assert "radial_velocity" in sc_as_dict
# But only the components that have been specified.
assert "pm_dec" not in sc_as_dict
@pytest.mark.parametrize(
"mjd", (52000, [52000], [[52000]], [52001, 52002], [[52001], [52002]])
)
def test_regression_10422(mjd):
"""
Check that we can get a GCRS for a scalar EarthLocation and a
size=1 non-scalar Time.
"""
# Avoid trying to download new IERS data.
with iers.earth_orientation_table.set(iers.IERS_B.open(iers.IERS_B_FILE)):
t = Time(mjd, format="mjd", scale="tai")
loc = EarthLocation(88258.0 * u.m, -4924882.2 * u.m, 3943729.0 * u.m)
p, v = loc.get_gcrs_posvel(obstime=t)
assert p.shape == v.shape == t.shape
@pytest.mark.remote_data
def test_regression_10291():
"""
According to https://eclipse.gsfc.nasa.gov/OH/transit12.html,
the minimum separation between Venus and the Sun during the 2012
transit is 554 arcseconds for an observer at the Geocenter.
If light deflection from the Sun is incorrectly applied, this increases
to 557 arcseconds.
"""
t = Time("2012-06-06 01:29:36")
sun = get_body("sun", t)
venus = get_body("venus", t)
assert_quantity_allclose(
venus.separation(sun), 554.427 * u.arcsecond, atol=0.001 * u.arcsecond
)
|
2e9ba76755aea0be2d20fffe84e4ea4431dfe3fe62bed9d45678f46f3300e9ae | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from numpy import testing as npt
from astropy import units as u
from astropy.coordinates import matching
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY
"""
These are the tests for coordinate matching.
Note that this requires scipy.
"""
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy.")
def test_matching_function():
from astropy.coordinates import ICRS
from astropy.coordinates.matching import match_coordinates_3d
# this only uses match_coordinates_3d because that's the actual implementation
cmatch = ICRS([4, 2.1] * u.degree, [0, 0] * u.degree)
ccatalog = ICRS([1, 2, 3, 4] * u.degree, [0, 0, 0, 0] * u.degree)
idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog)
npt.assert_array_equal(idx, [3, 1])
npt.assert_array_almost_equal(d2d.degree, [0, 0.1])
assert d3d.value[0] == 0
idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog, nthneighbor=2)
assert np.all(idx == 2)
npt.assert_array_almost_equal(d2d.degree, [1, 0.9])
npt.assert_array_less(d3d.value, 0.02)
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy.")
def test_matching_function_3d_and_sky():
from astropy.coordinates import ICRS
from astropy.coordinates.matching import match_coordinates_3d, match_coordinates_sky
cmatch = ICRS([4, 2.1] * u.degree, [0, 0] * u.degree, distance=[1, 5] * u.kpc)
ccatalog = ICRS(
[1, 2, 3, 4] * u.degree, [0, 0, 0, 0] * u.degree, distance=[1, 1, 1, 5] * u.kpc
)
idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog)
npt.assert_array_equal(idx, [2, 3])
assert_allclose(d2d, [1, 1.9] * u.deg)
assert np.abs(d3d[0].to_value(u.kpc) - np.radians(1)) < 1e-6
assert np.abs(d3d[1].to_value(u.kpc) - 5 * np.radians(1.9)) < 1e-5
idx, d2d, d3d = match_coordinates_sky(cmatch, ccatalog)
npt.assert_array_equal(idx, [3, 1])
assert_allclose(d2d, [0, 0.1] * u.deg)
assert_allclose(d3d, [4, 4.0000019] * u.kpc)
@pytest.mark.parametrize(
"functocheck, args, defaultkdtname, bothsaved",
[
(matching.match_coordinates_3d, [], "kdtree_3d", False),
(matching.match_coordinates_sky, [], "kdtree_sky", False),
(matching.search_around_3d, [1 * u.kpc], "kdtree_3d", True),
(matching.search_around_sky, [1 * u.deg], "kdtree_sky", False),
],
)
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy.")
def test_kdtree_storage(functocheck, args, defaultkdtname, bothsaved):
from astropy.coordinates import ICRS
def make_scs():
cmatch = ICRS([4, 2.1] * u.degree, [0, 0] * u.degree, distance=[1, 2] * u.kpc)
ccatalog = ICRS(
[1, 2, 3, 4] * u.degree,
[0, 0, 0, 0] * u.degree,
distance=[1, 2, 3, 4] * u.kpc,
)
return cmatch, ccatalog
cmatch, ccatalog = make_scs()
functocheck(cmatch, ccatalog, *args, storekdtree=False)
assert "kdtree" not in ccatalog.cache
assert defaultkdtname not in ccatalog.cache
cmatch, ccatalog = make_scs()
functocheck(cmatch, ccatalog, *args)
assert defaultkdtname in ccatalog.cache
assert "kdtree" not in ccatalog.cache
cmatch, ccatalog = make_scs()
functocheck(cmatch, ccatalog, *args, storekdtree=True)
assert "kdtree" in ccatalog.cache
assert defaultkdtname not in ccatalog.cache
cmatch, ccatalog = make_scs()
assert "tislit_cheese" not in ccatalog.cache
functocheck(cmatch, ccatalog, *args, storekdtree="tislit_cheese")
assert "tislit_cheese" in ccatalog.cache
assert defaultkdtname not in ccatalog.cache
assert "kdtree" not in ccatalog.cache
if bothsaved:
assert "tislit_cheese" in cmatch.cache
assert defaultkdtname not in cmatch.cache
assert "kdtree" not in cmatch.cache
else:
assert "tislit_cheese" not in cmatch.cache
# now a bit of a hacky trick to make sure it at least tries to *use* it
ccatalog.cache["tislit_cheese"] = 1
cmatch.cache["tislit_cheese"] = 1
with pytest.raises(TypeError) as e:
functocheck(cmatch, ccatalog, *args, storekdtree="tislit_cheese")
assert "KD" in e.value.args[0]
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy.")
def test_python_kdtree(monkeypatch):
from astropy.coordinates import ICRS
cmatch = ICRS([4, 2.1] * u.degree, [0, 0] * u.degree, distance=[1, 2] * u.kpc)
ccatalog = ICRS(
[1, 2, 3, 4] * u.degree, [0, 0, 0, 0] * u.degree, distance=[1, 2, 3, 4] * u.kpc
)
monkeypatch.delattr("scipy.spatial.cKDTree")
with pytest.warns(UserWarning, match=r"C-based KD tree not found"):
matching.match_coordinates_sky(cmatch, ccatalog)
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy.")
def test_matching_method():
from astropy.coordinates import ICRS, SkyCoord
from astropy.coordinates.matching import match_coordinates_3d, match_coordinates_sky
from astropy.utils import NumpyRNGContext
with NumpyRNGContext(987654321):
cmatch = ICRS(
np.random.rand(20) * 360.0 * u.degree,
(np.random.rand(20) * 180.0 - 90.0) * u.degree,
)
ccatalog = ICRS(
np.random.rand(100) * 360.0 * u.degree,
(np.random.rand(100) * 180.0 - 90.0) * u.degree,
)
idx1, d2d1, d3d1 = SkyCoord(cmatch).match_to_catalog_3d(ccatalog)
idx2, d2d2, d3d2 = match_coordinates_3d(cmatch, ccatalog)
npt.assert_array_equal(idx1, idx2)
assert_allclose(d2d1, d2d2)
assert_allclose(d3d1, d3d2)
# should be the same as above because there's no distance, but just make sure this method works
idx1, d2d1, d3d1 = SkyCoord(cmatch).match_to_catalog_sky(ccatalog)
idx2, d2d2, d3d2 = match_coordinates_sky(cmatch, ccatalog)
npt.assert_array_equal(idx1, idx2)
assert_allclose(d2d1, d2d2)
assert_allclose(d3d1, d3d2)
assert len(idx1) == len(d2d1) == len(d3d1) == 20
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
def test_search_around():
from astropy.coordinates import ICRS, SkyCoord
from astropy.coordinates.matching import search_around_3d, search_around_sky
coo1 = ICRS([4, 2.1] * u.degree, [0, 0] * u.degree, distance=[1, 5] * u.kpc)
coo2 = ICRS(
[1, 2, 3, 4] * u.degree, [0, 0, 0, 0] * u.degree, distance=[1, 1, 1, 5] * u.kpc
)
idx1_1deg, idx2_1deg, d2d_1deg, d3d_1deg = search_around_sky(
coo1, coo2, 1.01 * u.deg
)
idx1_0p05deg, idx2_0p05deg, d2d_0p05deg, d3d_0p05deg = search_around_sky(
coo1, coo2, 0.05 * u.deg
)
assert list(zip(idx1_1deg, idx2_1deg)) == [(0, 2), (0, 3), (1, 1), (1, 2)]
assert_allclose(d2d_1deg[0], 1.0 * u.deg, atol=1e-14 * u.deg, rtol=0)
assert_allclose(d2d_1deg, [1, 0, 0.1, 0.9] * u.deg)
assert list(zip(idx1_0p05deg, idx2_0p05deg)) == [(0, 3)]
idx1_1kpc, idx2_1kpc, d2d_1kpc, d3d_1kpc = search_around_3d(coo1, coo2, 1 * u.kpc)
idx1_sm, idx2_sm, d2d_sm, d3d_sm = search_around_3d(coo1, coo2, 0.05 * u.kpc)
assert list(zip(idx1_1kpc, idx2_1kpc)) == [(0, 0), (0, 1), (0, 2), (1, 3)]
assert list(zip(idx1_sm, idx2_sm)) == [(0, 1), (0, 2)]
assert_allclose(d2d_sm, [2, 1] * u.deg)
# Test for the non-matches, #4877
coo1 = ICRS([4.1, 2.1] * u.degree, [0, 0] * u.degree, distance=[1, 5] * u.kpc)
idx1, idx2, d2d, d3d = search_around_sky(coo1, coo2, 1 * u.arcsec)
assert idx1.size == idx2.size == d2d.size == d3d.size == 0
assert idx1.dtype == idx2.dtype == int
assert d2d.unit == u.deg
assert d3d.unit == u.kpc
idx1, idx2, d2d, d3d = search_around_3d(coo1, coo2, 1 * u.m)
assert idx1.size == idx2.size == d2d.size == d3d.size == 0
assert idx1.dtype == idx2.dtype == int
assert d2d.unit == u.deg
assert d3d.unit == u.kpc
# Test when one or both of the coordinate arrays is empty, #4875
empty = ICRS(ra=[] * u.degree, dec=[] * u.degree, distance=[] * u.kpc)
idx1, idx2, d2d, d3d = search_around_sky(empty, coo2, 1 * u.arcsec)
assert idx1.size == idx2.size == d2d.size == d3d.size == 0
assert idx1.dtype == idx2.dtype == int
assert d2d.unit == u.deg
assert d3d.unit == u.kpc
idx1, idx2, d2d, d3d = search_around_sky(coo1, empty, 1 * u.arcsec)
assert idx1.size == idx2.size == d2d.size == d3d.size == 0
assert idx1.dtype == idx2.dtype == int
assert d2d.unit == u.deg
assert d3d.unit == u.kpc
empty = ICRS(ra=[] * u.degree, dec=[] * u.degree, distance=[] * u.kpc)
idx1, idx2, d2d, d3d = search_around_sky(empty, empty[:], 1 * u.arcsec)
assert idx1.size == idx2.size == d2d.size == d3d.size == 0
assert idx1.dtype == idx2.dtype == int
assert d2d.unit == u.deg
assert d3d.unit == u.kpc
idx1, idx2, d2d, d3d = search_around_3d(empty, coo2, 1 * u.m)
assert idx1.size == idx2.size == d2d.size == d3d.size == 0
assert idx1.dtype == idx2.dtype == int
assert d2d.unit == u.deg
assert d3d.unit == u.kpc
idx1, idx2, d2d, d3d = search_around_3d(coo1, empty, 1 * u.m)
assert idx1.size == idx2.size == d2d.size == d3d.size == 0
assert idx1.dtype == idx2.dtype == int
assert d2d.unit == u.deg
assert d3d.unit == u.kpc
idx1, idx2, d2d, d3d = search_around_3d(empty, empty[:], 1 * u.m)
assert idx1.size == idx2.size == d2d.size == d3d.size == 0
assert idx1.dtype == idx2.dtype == int
assert d2d.unit == u.deg
assert d3d.unit == u.kpc
# Test that input without distance units results in a
# 'dimensionless_unscaled' unit
cempty = SkyCoord(ra=[], dec=[], unit=u.deg)
idx1, idx2, d2d, d3d = search_around_3d(cempty, cempty[:], 1 * u.m)
assert d2d.unit == u.deg
assert d3d.unit == u.dimensionless_unscaled
idx1, idx2, d2d, d3d = search_around_sky(cempty, cempty[:], 1 * u.m)
assert d2d.unit == u.deg
assert d3d.unit == u.dimensionless_unscaled
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
def test_search_around_scalar():
from astropy.coordinates import Angle, SkyCoord
cat = SkyCoord([1, 2, 3], [-30, 45, 8], unit="deg")
target = SkyCoord("1.1 -30.1", unit="deg")
with pytest.raises(ValueError) as excinfo:
cat.search_around_sky(target, Angle("2d"))
# make sure the error message is *specific* to search_around_sky rather than
# generic as reported in #3359
assert "search_around_sky" in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
cat.search_around_3d(target, Angle("2d"))
assert "search_around_3d" in str(excinfo.value)
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
def test_match_catalog_empty():
from astropy.coordinates import SkyCoord
sc1 = SkyCoord(1, 2, unit="deg")
cat0 = SkyCoord([], [], unit="deg")
cat1 = SkyCoord([1.1], [2.1], unit="deg")
cat2 = SkyCoord([1.1, 3], [2.1, 5], unit="deg")
sc1.match_to_catalog_sky(cat2)
sc1.match_to_catalog_3d(cat2)
sc1.match_to_catalog_sky(cat1)
sc1.match_to_catalog_3d(cat1)
with pytest.raises(ValueError) as excinfo:
sc1.match_to_catalog_sky(cat1[0])
assert "catalog" in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
sc1.match_to_catalog_3d(cat1[0])
assert "catalog" in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
sc1.match_to_catalog_sky(cat0)
assert "catalog" in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
sc1.match_to_catalog_3d(cat0)
assert "catalog" in str(excinfo.value)
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
@pytest.mark.filterwarnings(r"ignore:invalid value encountered in.*:RuntimeWarning")
def test_match_catalog_nan():
from astropy.coordinates import Galactic, SkyCoord
sc1 = SkyCoord(1, 2, unit="deg")
sc_with_nans = SkyCoord(1, np.nan, unit="deg")
cat = SkyCoord([1.1, 3], [2.1, 5], unit="deg")
cat_with_nans = SkyCoord([1.1, np.nan], [2.1, 5], unit="deg")
galcat_with_nans = Galactic([1.2, np.nan] * u.deg, [5.6, 7.8] * u.deg)
with pytest.raises(ValueError) as excinfo:
sc1.match_to_catalog_sky(cat_with_nans)
assert "Catalog coordinates cannot contain" in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
sc1.match_to_catalog_3d(cat_with_nans)
assert "Catalog coordinates cannot contain" in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
sc1.match_to_catalog_sky(galcat_with_nans)
assert "Catalog coordinates cannot contain" in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
sc1.match_to_catalog_3d(galcat_with_nans)
assert "Catalog coordinates cannot contain" in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
sc_with_nans.match_to_catalog_sky(cat)
assert "Matching coordinates cannot contain" in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
sc_with_nans.match_to_catalog_3d(cat)
assert "Matching coordinates cannot contain" in str(excinfo.value)
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
def test_match_catalog_nounit():
from astropy.coordinates import ICRS, CartesianRepresentation
from astropy.coordinates.matching import match_coordinates_sky
i1 = ICRS([[1], [2], [3]], representation_type=CartesianRepresentation)
i2 = ICRS([[1], [2], [4, 5]], representation_type=CartesianRepresentation)
i, sep, sep3d = match_coordinates_sky(i1, i2)
assert_allclose(sep3d, [1] * u.dimensionless_unscaled)
|
c4b7b45f098a3b0dcf0a5fa9ca7ed03674db8ec121e4dd874babd3704b9aae67 | """Test helper functions for coordinates."""
import numpy as np
def skycoord_equal(sc1, sc2):
"""SkyCoord equality useful for testing"""
if not sc1.is_equivalent_frame(sc2):
return False
if sc1.representation_type is not sc2.representation_type:
return False
if sc1.shape != sc2.shape:
return False # Maybe raise ValueError corresponding to future numpy behavior
eq = np.ones(shape=sc1.shape, dtype=bool)
for comp in sc1.data.components:
eq &= getattr(sc1.data, comp) == getattr(sc2.data, comp)
return np.all(eq)
|
bfe18fa4e59787bf063b0d585c84da7a91add263e4dbe12f5c02f26f9b2711f9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from contextlib import ExitStack
import numpy as np
import pytest
from numpy import testing as npt
from astropy import units as u
from astropy.coordinates import (
FK4,
FK5,
ICRS,
Angle,
CartesianRepresentation,
Galactic,
SkyCoord,
)
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.time import Time
from astropy.utils.compat import NUMPY_LT_1_24
from astropy.utils.exceptions import AstropyDeprecationWarning
def test_angle_arrays():
"""
Test arrays values with Angle objects.
"""
# Tests incomplete
a1 = Angle([0, 45, 90, 180, 270, 360, 720.0], unit=u.degree)
npt.assert_almost_equal([0.0, 45.0, 90.0, 180.0, 270.0, 360.0, 720.0], a1.value)
a2 = Angle(np.array([-90, -45, 0, 45, 90, 180, 270, 360]), unit=u.degree)
npt.assert_almost_equal([-90, -45, 0, 45, 90, 180, 270, 360], a2.value)
a3 = Angle(["12 degrees", "3 hours", "5 deg", "4rad"])
npt.assert_almost_equal([12.0, 45.0, 5.0, 229.18311805], a3.value)
assert a3.unit == u.degree
a4 = Angle(["12 degrees", "3 hours", "5 deg", "4rad"], u.radian)
npt.assert_almost_equal(a4.degree, a3.value)
assert a4.unit == u.radian
a5 = Angle([0, 45, 90, 180, 270, 360], unit=u.degree)
a6 = a5.sum()
npt.assert_almost_equal(a6.value, 945.0)
assert a6.unit is u.degree
with ExitStack() as stack:
if NUMPY_LT_1_24:
stack.enter_context(pytest.raises(TypeError))
stack.enter_context(
pytest.warns(
DeprecationWarning, match="automatic object dtype is deprecated"
)
)
else:
stack.enter_context(pytest.raises(ValueError))
a7 = Angle([a1, a2, a3], unit=u.degree)
a8 = Angle(["04:02:02", "03:02:01", "06:02:01"], unit=u.degree)
npt.assert_almost_equal(a8.value, [4.03388889, 3.03361111, 6.03361111])
a9 = Angle(np.array(["04:02:02", "03:02:01", "06:02:01"]), unit=u.degree)
npt.assert_almost_equal(a9.value, a8.value)
with pytest.raises(u.UnitsError):
a10 = Angle(["04:02:02", "03:02:01", "06:02:01"])
def test_dms():
a1 = Angle([0, 45.5, -45.5], unit=u.degree)
d, m, s = a1.dms
npt.assert_almost_equal(d, [0, 45, -45])
npt.assert_almost_equal(m, [0, 30, -30])
npt.assert_almost_equal(s, [0, 0, -0])
def test_hms():
a1 = Angle([0, 11.5, -11.5], unit=u.hour)
h, m, s = a1.hms
npt.assert_almost_equal(h, [0, 11, -11])
npt.assert_almost_equal(m, [0, 30, -30])
npt.assert_almost_equal(s, [0, 0, -0])
hms = a1.hms
hours = hms[0] + hms[1] / 60.0 + hms[2] / 3600.0
npt.assert_almost_equal(a1.hour, hours)
with pytest.warns(AstropyDeprecationWarning, match="hms_to_hours"):
a2 = Angle(hms, unit=u.hour)
npt.assert_almost_equal(a2.radian, a1.radian)
def test_array_coordinates_creation():
"""
Test creating coordinates from arrays.
"""
c = ICRS(np.array([1, 2]) * u.deg, np.array([3, 4]) * u.deg)
assert not c.ra.isscalar
with pytest.raises(ValueError):
c = ICRS(np.array([1, 2]) * u.deg, np.array([3, 4, 5]) * u.deg)
with pytest.raises(ValueError):
c = ICRS(np.array([1, 2, 4, 5]) * u.deg, np.array([[3, 4], [5, 6]]) * u.deg)
# make sure cartesian initialization also works
cart = CartesianRepresentation(
x=[1.0, 2.0] * u.kpc, y=[3.0, 4.0] * u.kpc, z=[5.0, 6.0] * u.kpc
)
c = ICRS(cart)
# also ensure strings can be arrays
c = SkyCoord(["1d0m0s", "2h02m00.3s"], ["3d", "4d"])
# but invalid strings cannot
with pytest.raises(ValueError):
c = SkyCoord(Angle(["10m0s", "2h02m00.3s"]), Angle(["3d", "4d"]))
with pytest.raises(ValueError):
c = SkyCoord(Angle(["1d0m0s", "2h02m00.3s"]), Angle(["3x", "4d"]))
def test_array_coordinates_distances():
"""
Test creating coordinates from arrays and distances.
"""
# correct way
ICRS(
ra=np.array([1, 2]) * u.deg,
dec=np.array([3, 4]) * u.deg,
distance=[0.1, 0.2] * u.kpc,
)
with pytest.raises(ValueError):
# scalar distance and mismatched array coordinates
ICRS(
ra=np.array([1, 2, 3]) * u.deg,
dec=np.array([[3, 4], [5, 6]]) * u.deg,
distance=2.0 * u.kpc,
)
with pytest.raises(ValueError):
# more distance values than coordinates
ICRS(
ra=np.array([1, 2]) * u.deg,
dec=np.array([3, 4]) * u.deg,
distance=[0.1, 0.2, 3.0] * u.kpc,
)
@pytest.mark.parametrize(
("arrshape", "distance"), [((2,), None), ((4, 2, 5), None), ((4, 2, 5), 2 * u.kpc)]
)
def test_array_coordinates_transformations(arrshape, distance):
"""
Test transformation on coordinates with array content (first length-2 1D, then a 3D array)
"""
# M31 coordinates from test_transformations
raarr = np.ones(arrshape) * 10.6847929
decarr = np.ones(arrshape) * 41.2690650
if distance is not None:
distance = np.ones(arrshape) * distance
print(raarr, decarr, distance)
c = ICRS(ra=raarr * u.deg, dec=decarr * u.deg, distance=distance)
g = c.transform_to(Galactic())
assert g.l.shape == arrshape
npt.assert_array_almost_equal(g.l.degree, 121.17440967)
npt.assert_array_almost_equal(g.b.degree, -21.57299631)
if distance is not None:
assert g.distance.unit == c.distance.unit
# now make sure round-tripping works through FK5
c2 = c.transform_to(FK5()).transform_to(ICRS())
npt.assert_array_almost_equal(c.ra.radian, c2.ra.radian)
npt.assert_array_almost_equal(c.dec.radian, c2.dec.radian)
assert c2.ra.shape == arrshape
if distance is not None:
assert c2.distance.unit == c.distance.unit
# also make sure it's possible to get to FK4, which uses a direct transform function.
fk4 = c.transform_to(FK4())
npt.assert_array_almost_equal(fk4.ra.degree, 10.0004, decimal=4)
npt.assert_array_almost_equal(fk4.dec.degree, 40.9953, decimal=4)
assert fk4.ra.shape == arrshape
if distance is not None:
assert fk4.distance.unit == c.distance.unit
# now check the reverse transforms run
cfk4 = fk4.transform_to(ICRS())
assert cfk4.ra.shape == arrshape
def test_array_precession():
"""
Ensures that FK5 coordinates as arrays precess their equinoxes
"""
j2000 = Time("J2000")
j1975 = Time("J1975")
fk5 = FK5([1, 1.1] * u.radian, [0.5, 0.6] * u.radian)
assert fk5.equinox.jyear == j2000.jyear
fk5_2 = fk5.transform_to(FK5(equinox=j1975))
assert fk5_2.equinox.jyear == j1975.jyear
npt.assert_array_less(0.05, np.abs(fk5.ra.degree - fk5_2.ra.degree))
npt.assert_array_less(0.05, np.abs(fk5.dec.degree - fk5_2.dec.degree))
def test_array_separation():
c1 = ICRS([0, 0] * u.deg, [0, 0] * u.deg)
c2 = ICRS([1, 2] * u.deg, [0, 0] * u.deg)
npt.assert_array_almost_equal(c1.separation(c2).degree, [1, 2])
c3 = ICRS([0, 3.0] * u.deg, [0.0, 0] * u.deg, distance=[1, 1.0] * u.kpc)
c4 = ICRS([1, 1.0] * u.deg, [0.0, 0] * u.deg, distance=[1, 1.0] * u.kpc)
# the 3-1 separation should be twice the 0-1 separation, but not *exactly* the same
sep = c3.separation_3d(c4)
sepdiff = sep[1] - (2 * sep[0])
assert abs(sepdiff.value) < 1e-5
assert sepdiff != 0
def test_array_indexing():
ra = np.linspace(0, 360, 10)
dec = np.linspace(-90, 90, 10)
j1975 = Time(1975, format="jyear")
c1 = FK5(ra * u.deg, dec * u.deg, equinox=j1975)
c2 = c1[4]
assert c2.ra.degree == 160
assert c2.dec.degree == -10
c3 = c1[2:5]
assert_allclose(c3.ra, [80, 120, 160] * u.deg)
assert_allclose(c3.dec, [-50, -30, -10] * u.deg)
c4 = c1[np.array([2, 5, 8])]
assert_allclose(c4.ra, [80, 200, 320] * u.deg)
assert_allclose(c4.dec, [-50, 10, 70] * u.deg)
# now make sure the equinox is preserved
assert c2.equinox == c1.equinox
assert c3.equinox == c1.equinox
assert c4.equinox == c1.equinox
def test_array_len():
input_length = [1, 5]
for length in input_length:
ra = np.linspace(0, 360, length)
dec = np.linspace(0, 90, length)
c = ICRS(ra * u.deg, dec * u.deg)
assert len(c) == length
assert c.shape == (length,)
with pytest.raises(TypeError):
c = ICRS(0 * u.deg, 0 * u.deg)
len(c)
assert c.shape == tuple()
def test_array_eq():
c1 = ICRS([1, 2] * u.deg, [3, 4] * u.deg)
c2 = ICRS([1, 2] * u.deg, [3, 5] * u.deg)
c3 = ICRS([1, 3] * u.deg, [3, 4] * u.deg)
c4 = ICRS([1, 2] * u.deg, [3, 4.2] * u.deg)
assert np.all(c1 == c1)
assert np.any(c1 != c2)
assert np.any(c1 != c3)
assert np.any(c1 != c4)
|
0ac2c23ee06a4fa13fc6fe1603da5a89653ed2c16262c599b777915cd0f7f8be | # These are no-regression tests for PR #13572
import numpy as np
import pytest
from numpy.testing import assert_allclose
import astropy.units as u
from astropy.coordinates import earth_orientation
from astropy.time import Time
@pytest.fixture
def tt_to_test():
return Time("2022-08-25", scale="tt")
@pytest.mark.parametrize(
"algorithm, result",
[(2006, 23.43633313804873), (2000, 23.43634457995851), (1980, 23.436346167704045)],
)
def test_obliquity(tt_to_test, algorithm, result):
assert_allclose(
earth_orientation.obliquity(tt_to_test.jd, algorithm=algorithm),
result,
rtol=1e-13,
)
def test_precession_matrix_Capitaine(tt_to_test):
assert_allclose(
earth_orientation.precession_matrix_Capitaine(
tt_to_test, tt_to_test + 12.345 * u.yr
),
np.array(
[
[9.99995470e-01, -2.76086535e-03, -1.19936388e-03],
[2.76086537e-03, +9.99996189e-01, -1.64025847e-06],
[1.19936384e-03, -1.67103117e-06, +9.99999281e-01],
]
),
rtol=1e-6,
)
def test_nutation_components2000B(tt_to_test):
assert_allclose(
earth_orientation.nutation_components2000B(tt_to_test.jd),
(0.4090413775522035, -5.4418953539440996e-05, 3.176996651841667e-05),
rtol=1e-13,
)
def test_nutation_matrix(tt_to_test):
assert_allclose(
earth_orientation.nutation_matrix(tt_to_test),
np.array(
[
[+9.99999999e-01, +4.99295268e-05, +2.16440489e-05],
[-4.99288392e-05, +9.99999998e-01, -3.17705068e-05],
[-2.16456351e-05, +3.17694261e-05, +9.99999999e-01],
]
),
rtol=1e-6,
)
|
347948aab4f52eabb0a48f2d6efb4ae1134945bb0a13bcfc033f92fd3177a9bf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from astropy import units as u
from astropy.coordinates.angles import Angle, Latitude, Longitude
from astropy.coordinates.distances import Distance
from astropy.coordinates.matrix_utilities import rotation_matrix
from astropy.coordinates.representation import (
DIFFERENTIAL_CLASSES,
DUPLICATE_REPRESENTATIONS,
REPRESENTATION_CLASSES,
BaseRepresentation,
CartesianDifferential,
CartesianRepresentation,
CylindricalDifferential,
CylindricalRepresentation,
PhysicsSphericalDifferential,
PhysicsSphericalRepresentation,
RadialDifferential,
RadialRepresentation,
SphericalCosLatDifferential,
SphericalDifferential,
SphericalRepresentation,
UnitSphericalCosLatDifferential,
UnitSphericalDifferential,
UnitSphericalRepresentation,
)
from astropy.tests.helper import assert_quantity_allclose as assert_allclose_quantity
from astropy.utils import isiterable
from astropy.utils.exceptions import DuplicateRepresentationWarning
# create matrices for use in testing ``.transform()`` methods
matrices = {
"rotation": rotation_matrix(-10, "z", u.deg),
"general": np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
}
# Preserve the original REPRESENTATION_CLASSES dict so that importing
# the test file doesn't add a persistent test subclass (LogDRepresentation)
def setup_function(func):
func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES)
func.DUPLICATE_REPRESENTATIONS_ORIG = deepcopy(DUPLICATE_REPRESENTATIONS)
def teardown_function(func):
REPRESENTATION_CLASSES.clear()
REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG)
DUPLICATE_REPRESENTATIONS.clear()
DUPLICATE_REPRESENTATIONS.update(func.DUPLICATE_REPRESENTATIONS_ORIG)
def components_equal(rep1, rep2):
result = True
if type(rep1) is not type(rep2):
return False
for component in rep1.components:
result &= getattr(rep1, component) == getattr(rep2, component)
return result
def components_allclose(rep1, rep2):
result = True
if type(rep1) is not type(rep2):
return False
for component in rep1.components:
result &= u.allclose(getattr(rep1, component), getattr(rep2, component))
return result
def representation_equal(rep1, rep2):
result = True
if type(rep1) is not type(rep2):
return False
if getattr(rep1, "_differentials", False):
if rep1._differentials.keys() != rep2._differentials.keys():
return False
for key, diff1 in rep1._differentials.items():
result &= components_equal(diff1, rep2._differentials[key])
elif getattr(rep2, "_differentials", False):
return False
return result & components_equal(rep1, rep2)
def representation_equal_up_to_angular_type(rep1, rep2):
result = True
if type(rep1) is not type(rep2):
return False
if getattr(rep1, "_differentials", False):
if rep1._differentials.keys() != rep2._differentials.keys():
return False
for key, diff1 in rep1._differentials.items():
result &= components_allclose(diff1, rep2._differentials[key])
elif getattr(rep2, "_differentials", False):
return False
return result & components_allclose(rep1, rep2)
class TestRadialRepresentation:
def test_transform(self):
"""Test the ``transform`` method. Only multiplication matrices pass."""
rep = RadialRepresentation(distance=10 * u.kpc)
# a rotation matrix does not work
matrix = rotation_matrix(10 * u.deg)
with pytest.raises(ValueError, match="scaled identity matrix"):
rep.transform(matrix)
# only a scaled identity matrix
matrix = 3 * np.identity(3)
newrep = rep.transform(matrix)
assert newrep.distance == 30 * u.kpc
# let's also check with differentials
dif = RadialDifferential(d_distance=-3 * u.km / u.s)
rep = rep.with_differentials(dict(s=dif))
newrep = rep.transform(matrix)
assert newrep.distance == 30 * u.kpc
assert newrep.differentials["s"].d_distance == -9 * u.km / u.s
class TestSphericalRepresentation:
def test_name(self):
assert SphericalRepresentation.get_name() == "spherical"
assert SphericalRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = SphericalRepresentation()
def test_init_quantity(self):
s3 = SphericalRepresentation(
lon=8 * u.hourangle, lat=5 * u.deg, distance=10 * u.kpc
)
assert s3.lon == 8.0 * u.hourangle
assert s3.lat == 5.0 * u.deg
assert s3.distance == 10 * u.kpc
assert isinstance(s3.lon, Longitude)
assert isinstance(s3.lat, Latitude)
assert isinstance(s3.distance, Distance)
def test_init_no_mutate_input(self):
lon = -1 * u.hourangle
s = SphericalRepresentation(
lon=lon, lat=-1 * u.deg, distance=1 * u.kpc, copy=True
)
# The longitude component should be wrapped at 24 hours
assert_allclose_quantity(s.lon, 23 * u.hourangle)
# The input should not have been mutated by the constructor
assert_allclose_quantity(lon, -1 * u.hourangle)
def test_init_lonlat(self):
s2 = SphericalRepresentation(
Longitude(8, u.hour), Latitude(5, u.deg), Distance(10, u.kpc)
)
assert s2.lon == 8.0 * u.hourangle
assert s2.lat == 5.0 * u.deg
assert s2.distance == 10.0 * u.kpc
assert isinstance(s2.lon, Longitude)
assert isinstance(s2.lat, Latitude)
assert isinstance(s2.distance, Distance)
# also test that wrap_angle is preserved
s3 = SphericalRepresentation(
Longitude(-90, u.degree, wrap_angle=180 * u.degree),
Latitude(-45, u.degree),
Distance(1.0, u.Rsun),
)
assert s3.lon == -90.0 * u.degree
assert s3.lon.wrap_angle == 180 * u.degree
def test_init_subclass(self):
class Longitude180(Longitude):
_default_wrap_angle = 180 * u.degree
s = SphericalRepresentation(
Longitude180(-90, u.degree), Latitude(-45, u.degree), Distance(1.0, u.Rsun)
)
assert isinstance(s.lon, Longitude180)
assert s.lon == -90.0 * u.degree
assert s.lon.wrap_angle == 180 * u.degree
def test_init_array(self):
s1 = SphericalRepresentation(
lon=[8, 9] * u.hourangle, lat=[5, 6] * u.deg, distance=[1, 2] * u.kpc
)
assert_allclose(s1.lon.degree, [120, 135])
assert_allclose(s1.lat.degree, [5, 6])
assert_allclose(s1.distance.kpc, [1, 2])
assert isinstance(s1.lon, Longitude)
assert isinstance(s1.lat, Latitude)
assert isinstance(s1.distance, Distance)
def test_init_array_nocopy(self):
lon = Longitude([8, 9] * u.hourangle)
lat = Latitude([5, 6] * u.deg)
distance = Distance([1, 2] * u.kpc)
s1 = SphericalRepresentation(lon=lon, lat=lat, distance=distance, copy=False)
lon[:] = [1, 2] * u.rad
lat[:] = [3, 4] * u.arcmin
distance[:] = [8, 9] * u.Mpc
assert_allclose_quantity(lon, s1.lon)
assert_allclose_quantity(lat, s1.lat)
assert_allclose_quantity(distance, s1.distance)
def test_init_float32_array(self):
"""Regression test against #2983"""
lon = Longitude(np.float32([1.0, 2.0]), u.degree)
lat = Latitude(np.float32([3.0, 4.0]), u.degree)
s1 = UnitSphericalRepresentation(lon=lon, lat=lat, copy=False)
assert s1.lon.dtype == np.float32
assert s1.lat.dtype == np.float32
assert s1._values["lon"].dtype == np.float32
assert s1._values["lat"].dtype == np.float32
def test_reprobj(self):
s1 = SphericalRepresentation(
lon=8 * u.hourangle, lat=5 * u.deg, distance=10 * u.kpc
)
s2 = SphericalRepresentation.from_representation(s1)
assert_allclose_quantity(s2.lon, 8.0 * u.hourangle)
assert_allclose_quantity(s2.lat, 5.0 * u.deg)
assert_allclose_quantity(s2.distance, 10 * u.kpc)
s3 = SphericalRepresentation(s1)
assert representation_equal(s1, s3)
def test_broadcasting(self):
s1 = SphericalRepresentation(
lon=[8, 9] * u.hourangle, lat=[5, 6] * u.deg, distance=10 * u.kpc
)
assert_allclose_quantity(s1.lon, [120, 135] * u.degree)
assert_allclose_quantity(s1.lat, [5, 6] * u.degree)
assert_allclose_quantity(s1.distance, [10, 10] * u.kpc)
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = SphericalRepresentation(
lon=[8, 9, 10] * u.hourangle,
lat=[5, 6] * u.deg,
distance=[1, 2] * u.kpc,
)
assert (
exc.value.args[0]
== "Input parameters lon, lat, and distance cannot be broadcast"
)
def test_broadcasting_and_nocopy(self):
s1 = SphericalRepresentation(
lon=[200] * u.deg, lat=[0] * u.deg, distance=[0] * u.kpc, copy=False
)
# With no copying, we should be able to modify the wrap angle of the longitude component
s1.lon.wrap_angle = 180 * u.deg
s2 = SphericalRepresentation(
lon=[200] * u.deg, lat=0 * u.deg, distance=0 * u.kpc, copy=False
)
# We should be able to modify the wrap angle of the longitude component even if other
# components need to be broadcasted
s2.lon.wrap_angle = 180 * u.deg
def test_readonly(self):
s1 = SphericalRepresentation(
lon=8 * u.hourangle, lat=5 * u.deg, distance=1.0 * u.kpc
)
with pytest.raises(AttributeError):
s1.lon = 1.0 * u.deg
with pytest.raises(AttributeError):
s1.lat = 1.0 * u.deg
with pytest.raises(AttributeError):
s1.distance = 1.0 * u.kpc
def test_getitem_len_iterable(self):
s = SphericalRepresentation(
lon=np.arange(10) * u.deg, lat=-np.arange(10) * u.deg, distance=1 * u.kpc
)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.lon, [2, 4, 6] * u.deg)
assert_allclose_quantity(s_slc.lat, [-2, -4, -6] * u.deg)
assert_allclose_quantity(s_slc.distance, [1, 1, 1] * u.kpc)
assert len(s) == 10
assert isiterable(s)
def test_getitem_len_iterable_scalar(self):
s = SphericalRepresentation(lon=1 * u.deg, lat=-2 * u.deg, distance=3 * u.kpc)
with pytest.raises(TypeError):
s_slc = s[0]
with pytest.raises(TypeError):
len(s)
assert not isiterable(s)
def test_setitem(self):
s = SphericalRepresentation(
lon=np.arange(5) * u.deg, lat=-np.arange(5) * u.deg, distance=1 * u.kpc
)
s[:2] = SphericalRepresentation(
lon=10.0 * u.deg, lat=2.0 * u.deg, distance=5.0 * u.kpc
)
assert_allclose_quantity(s.lon, [10, 10, 2, 3, 4] * u.deg)
assert_allclose_quantity(s.lat, [2, 2, -2, -3, -4] * u.deg)
assert_allclose_quantity(s.distance, [5, 5, 1, 1, 1] * u.kpc)
def test_negative_distance(self):
"""Only allowed if explicitly passed on."""
with pytest.raises(ValueError, match="allow_negative"):
SphericalRepresentation(10 * u.deg, 20 * u.deg, -10 * u.m)
s1 = SphericalRepresentation(
10 * u.deg, 20 * u.deg, Distance(-10 * u.m, allow_negative=True)
)
assert s1.distance == -10.0 * u.m
def test_nan_distance(self):
"""This is a regression test: calling represent_as() and passing in the
same class as the object shouldn't round-trip through cartesian.
"""
sph = SphericalRepresentation(1 * u.deg, 2 * u.deg, np.nan * u.kpc)
new_sph = sph.represent_as(SphericalRepresentation)
assert_allclose_quantity(new_sph.lon, sph.lon)
assert_allclose_quantity(new_sph.lat, sph.lat)
dif = SphericalCosLatDifferential(
1 * u.mas / u.yr, 2 * u.mas / u.yr, 3 * u.km / u.s
)
sph = sph.with_differentials(dif)
new_sph = sph.represent_as(SphericalRepresentation)
assert_allclose_quantity(new_sph.lon, sph.lon)
assert_allclose_quantity(new_sph.lat, sph.lat)
def test_raise_on_extra_arguments(self):
with pytest.raises(TypeError, match="got multiple values"):
SphericalRepresentation(1 * u.deg, 2 * u.deg, 1.0 * u.kpc, lat=10)
with pytest.raises(TypeError, match="unexpected keyword.*parrot"):
SphericalRepresentation(1 * u.deg, 2 * u.deg, 1.0 * u.kpc, parrot=10)
def test_representation_shortcuts(self):
"""Test that shortcuts in ``represent_as`` don't fail."""
difs = SphericalCosLatDifferential(
4 * u.mas / u.yr, 5 * u.mas / u.yr, 6 * u.km / u.s
)
sph = SphericalRepresentation(
1 * u.deg, 2 * u.deg, 3 * u.kpc, differentials={"s": difs}
)
got = sph.represent_as(
PhysicsSphericalRepresentation, PhysicsSphericalDifferential
)
assert np.may_share_memory(sph.lon, got.phi)
assert np.may_share_memory(sph.distance, got.r)
expected = BaseRepresentation.represent_as(
sph, PhysicsSphericalRepresentation, PhysicsSphericalDifferential
)
# equal up to angular type
assert representation_equal_up_to_angular_type(got, expected)
got = sph.represent_as(UnitSphericalRepresentation, UnitSphericalDifferential)
assert np.may_share_memory(sph.lon, got.lon)
assert np.may_share_memory(sph.lat, got.lat)
expected = BaseRepresentation.represent_as(
sph, UnitSphericalRepresentation, UnitSphericalDifferential
)
assert representation_equal_up_to_angular_type(got, expected)
def test_transform(self):
"""Test ``.transform()`` on rotation and general matrices."""
# set up representation
ds1 = SphericalDifferential(
d_lon=[1, 2] * u.mas / u.yr,
d_lat=[3, 4] * u.mas / u.yr,
d_distance=[-5, 6] * u.km / u.s,
)
s1 = SphericalRepresentation(
lon=[1, 2] * u.deg,
lat=[3, 4] * u.deg,
distance=[5, 6] * u.kpc,
differentials=ds1,
)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = SphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2
)
assert_allclose_quantity(s2.lon, s1.lon + 10 * u.deg)
assert_allclose_quantity(s2.lat, s1.lat)
assert_allclose_quantity(s2.distance, s1.distance)
# check differentials. they shouldn't have changed.
assert_allclose_quantity(ds2.d_lon, ds1.d_lon)
assert_allclose_quantity(ds2.d_lat, ds1.d_lat)
assert_allclose_quantity(ds2.d_distance, ds1.d_distance)
assert_allclose_quantity(ds2.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds2.d_lat, dexpected.d_lat)
assert_allclose_quantity(ds2.d_distance, dexpected.d_distance)
# now with a non rotation matrix
# transform representation & get comparison (thru CartesianRep)
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
expected = (
s1.represent_as(CartesianRepresentation, CartesianDifferential)
.transform(matrices["general"])
.represent_as(SphericalRepresentation, SphericalDifferential)
)
dexpected = expected.differentials["s"]
assert_allclose_quantity(s3.lon, expected.lon)
assert_allclose_quantity(s3.lat, expected.lat)
assert_allclose_quantity(s3.distance, expected.distance)
assert_allclose_quantity(ds3.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds3.d_lat, dexpected.d_lat)
assert_allclose_quantity(ds3.d_distance, dexpected.d_distance)
def test_transform_with_NaN(self):
# all over again, but with a NaN in the distance
ds1 = SphericalDifferential(
d_lon=[1, 2] * u.mas / u.yr,
d_lat=[3, 4] * u.mas / u.yr,
d_distance=[-5, 6] * u.km / u.s,
)
s1 = SphericalRepresentation(
lon=[1, 2] * u.deg,
lat=[3, 4] * u.deg,
distance=[5, np.nan] * u.kpc,
differentials=ds1,
)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = SphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2
)
assert_allclose_quantity(s2.lon, s1.lon + 10 * u.deg)
assert_allclose_quantity(s2.lat, s1.lat)
assert_allclose_quantity(s2.distance, s1.distance)
assert_allclose_quantity(ds2.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds2.d_lat, dexpected.d_lat)
assert_allclose_quantity(ds2.d_distance, dexpected.d_distance)
# the 2nd component is NaN since the 2nd distance is NaN
# TODO! this will change when ``.transform`` skips Cartesian
assert_array_equal(np.isnan(ds2.d_lon), (False, True))
assert_array_equal(np.isnan(ds2.d_lat), (False, True))
assert_array_equal(np.isnan(ds2.d_distance), (False, True))
# now with a non rotation matrix
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
thruC = (
s1.represent_as(CartesianRepresentation, CartesianDifferential)
.transform(matrices["general"])
.represent_as(
SphericalRepresentation, differential_class=SphericalDifferential
)
)
dthruC = thruC.differentials["s"]
# s3 should not propagate Nan.
assert_array_equal(np.isnan(s3.lon), (False, False))
assert_array_equal(np.isnan(s3.lat), (False, False))
assert_array_equal(np.isnan(s3.distance), (False, True))
# ds3 does b/c currently aren't any shortcuts on the transform
assert_array_equal(np.isnan(ds3.d_lon), (False, True))
assert_array_equal(np.isnan(ds3.d_lat), (False, True))
assert_array_equal(np.isnan(ds3.d_distance), (False, True))
# through Cartesian should
assert_array_equal(np.isnan(thruC.lon), (False, True))
assert_array_equal(np.isnan(thruC.lat), (False, True))
assert_array_equal(np.isnan(thruC.distance), (False, True))
assert_array_equal(np.isnan(dthruC.d_lon), (False, True))
assert_array_equal(np.isnan(dthruC.d_lat), (False, True))
assert_array_equal(np.isnan(dthruC.d_distance), (False, True))
# test that they are close on the first value
assert_allclose_quantity(s3.lon[0], thruC.lon[0])
assert_allclose_quantity(s3.lat[0], thruC.lat[0])
assert_allclose_quantity(ds3.d_lon[0], dthruC.d_lon[0])
assert_allclose_quantity(ds3.d_lat[0], dthruC.d_lat[0])
class TestUnitSphericalRepresentation:
def test_name(self):
assert UnitSphericalRepresentation.get_name() == "unitspherical"
assert UnitSphericalRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = UnitSphericalRepresentation()
def test_init_quantity(self):
s3 = UnitSphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg)
assert s3.lon == 8.0 * u.hourangle
assert s3.lat == 5.0 * u.deg
assert isinstance(s3.lon, Longitude)
assert isinstance(s3.lat, Latitude)
def test_init_lonlat(self):
s2 = UnitSphericalRepresentation(Longitude(8, u.hour), Latitude(5, u.deg))
assert s2.lon == 8.0 * u.hourangle
assert s2.lat == 5.0 * u.deg
assert isinstance(s2.lon, Longitude)
assert isinstance(s2.lat, Latitude)
def test_init_array(self):
s1 = UnitSphericalRepresentation(lon=[8, 9] * u.hourangle, lat=[5, 6] * u.deg)
assert_allclose(s1.lon.degree, [120, 135])
assert_allclose(s1.lat.degree, [5, 6])
assert isinstance(s1.lon, Longitude)
assert isinstance(s1.lat, Latitude)
def test_init_array_nocopy(self):
lon = Longitude([8, 9] * u.hourangle)
lat = Latitude([5, 6] * u.deg)
s1 = UnitSphericalRepresentation(lon=lon, lat=lat, copy=False)
lon[:] = [1, 2] * u.rad
lat[:] = [3, 4] * u.arcmin
assert_allclose_quantity(lon, s1.lon)
assert_allclose_quantity(lat, s1.lat)
def test_reprobj(self):
s1 = UnitSphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg)
s2 = UnitSphericalRepresentation.from_representation(s1)
assert_allclose_quantity(s2.lon, 8.0 * u.hourangle)
assert_allclose_quantity(s2.lat, 5.0 * u.deg)
s3 = UnitSphericalRepresentation(s1)
assert representation_equal(s3, s1)
def test_broadcasting(self):
s1 = UnitSphericalRepresentation(lon=[8, 9] * u.hourangle, lat=[5, 6] * u.deg)
assert_allclose_quantity(s1.lon, [120, 135] * u.degree)
assert_allclose_quantity(s1.lat, [5, 6] * u.degree)
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = UnitSphericalRepresentation(
lon=[8, 9, 10] * u.hourangle, lat=[5, 6] * u.deg
)
assert exc.value.args[0] == "Input parameters lon and lat cannot be broadcast"
def test_readonly(self):
s1 = UnitSphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg)
with pytest.raises(AttributeError):
s1.lon = 1.0 * u.deg
with pytest.raises(AttributeError):
s1.lat = 1.0 * u.deg
def test_getitem(self):
s = UnitSphericalRepresentation(
lon=np.arange(10) * u.deg, lat=-np.arange(10) * u.deg
)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.lon, [2, 4, 6] * u.deg)
assert_allclose_quantity(s_slc.lat, [-2, -4, -6] * u.deg)
def test_getitem_scalar(self):
s = UnitSphericalRepresentation(lon=1 * u.deg, lat=-2 * u.deg)
with pytest.raises(TypeError):
s_slc = s[0]
def test_representation_shortcuts(self):
"""Test that shortcuts in ``represent_as`` don't fail."""
# TODO! representation transformations with differentials cannot
# (currently) be implemented due to a mismatch between the UnitSpherical
# expected keys (e.g. "s") and that expected in the other class
# (here "s / m"). For more info, see PR #11467
# We leave the test code commented out for future use.
# diffs = UnitSphericalCosLatDifferential(4*u.mas/u.yr, 5*u.mas/u.yr,
# 6*u.km/u.s)
sph = UnitSphericalRepresentation(1 * u.deg, 2 * u.deg)
# , differentials={'s': diffs}
got = sph.represent_as(PhysicsSphericalRepresentation)
# , PhysicsSphericalDifferential)
assert np.may_share_memory(sph.lon, got.phi)
expected = BaseRepresentation.represent_as(
sph, PhysicsSphericalRepresentation
) # PhysicsSphericalDifferential
assert representation_equal_up_to_angular_type(got, expected)
got = sph.represent_as(SphericalRepresentation)
# , SphericalDifferential)
assert np.may_share_memory(sph.lon, got.lon)
assert np.may_share_memory(sph.lat, got.lat)
expected = BaseRepresentation.represent_as(
sph, SphericalRepresentation
) # , SphericalDifferential)
assert representation_equal_up_to_angular_type(got, expected)
def test_transform(self):
"""Test ``.transform()`` on rotation and general matrices."""
# set up representation
ds1 = UnitSphericalDifferential(
d_lon=[1, 2] * u.mas / u.yr,
d_lat=[3, 4] * u.mas / u.yr,
)
s1 = UnitSphericalRepresentation(
lon=[1, 2] * u.deg, lat=[3, 4] * u.deg, differentials=ds1
)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = UnitSphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2
)
assert_allclose_quantity(s2.lon, s1.lon + 10 * u.deg)
assert_allclose_quantity(s2.lat, s1.lat)
# compare differentials. they should be unchanged (ds1).
assert_allclose_quantity(ds2.d_lon, ds1.d_lon)
assert_allclose_quantity(ds2.d_lat, ds1.d_lat)
assert_allclose_quantity(ds2.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds2.d_lat, dexpected.d_lat)
assert not hasattr(ds2, "d_distance")
# now with a non rotation matrix
# note that the result will be a Spherical, not UnitSpherical
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
expected = (
s1.represent_as(CartesianRepresentation, CartesianDifferential)
.transform(matrices["general"])
.represent_as(
SphericalRepresentation, differential_class=SphericalDifferential
)
)
dexpected = expected.differentials["s"]
assert_allclose_quantity(s3.lon, expected.lon)
assert_allclose_quantity(s3.lat, expected.lat)
assert_allclose_quantity(s3.distance, expected.distance)
assert_allclose_quantity(ds3.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds3.d_lat, dexpected.d_lat)
assert_allclose_quantity(ds3.d_distance, dexpected.d_distance)
class TestPhysicsSphericalRepresentation:
def test_name(self):
assert PhysicsSphericalRepresentation.get_name() == "physicsspherical"
assert PhysicsSphericalRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = PhysicsSphericalRepresentation()
def test_init_quantity(self):
s3 = PhysicsSphericalRepresentation(
phi=8 * u.hourangle, theta=5 * u.deg, r=10 * u.kpc
)
assert s3.phi == 8.0 * u.hourangle
assert s3.theta == 5.0 * u.deg
assert s3.r == 10 * u.kpc
assert isinstance(s3.phi, Angle)
assert isinstance(s3.theta, Angle)
assert isinstance(s3.r, Distance)
def test_init_phitheta(self):
s2 = PhysicsSphericalRepresentation(
Angle(8, u.hour), Angle(5, u.deg), Distance(10, u.kpc)
)
assert s2.phi == 8.0 * u.hourangle
assert s2.theta == 5.0 * u.deg
assert s2.r == 10.0 * u.kpc
assert isinstance(s2.phi, Angle)
assert isinstance(s2.theta, Angle)
assert isinstance(s2.r, Distance)
def test_init_array(self):
s1 = PhysicsSphericalRepresentation(
phi=[8, 9] * u.hourangle, theta=[5, 6] * u.deg, r=[1, 2] * u.kpc
)
assert_allclose(s1.phi.degree, [120, 135])
assert_allclose(s1.theta.degree, [5, 6])
assert_allclose(s1.r.kpc, [1, 2])
assert isinstance(s1.phi, Angle)
assert isinstance(s1.theta, Angle)
assert isinstance(s1.r, Distance)
def test_init_array_nocopy(self):
phi = Angle([8, 9] * u.hourangle)
theta = Angle([5, 6] * u.deg)
r = Distance([1, 2] * u.kpc)
s1 = PhysicsSphericalRepresentation(phi=phi, theta=theta, r=r, copy=False)
phi[:] = [1, 2] * u.rad
theta[:] = [3, 4] * u.arcmin
r[:] = [8, 9] * u.Mpc
assert_allclose_quantity(phi, s1.phi)
assert_allclose_quantity(theta, s1.theta)
assert_allclose_quantity(r, s1.r)
def test_reprobj(self):
s1 = PhysicsSphericalRepresentation(
phi=8 * u.hourangle, theta=5 * u.deg, r=10 * u.kpc
)
s2 = PhysicsSphericalRepresentation.from_representation(s1)
assert_allclose_quantity(s2.phi, 8.0 * u.hourangle)
assert_allclose_quantity(s2.theta, 5.0 * u.deg)
assert_allclose_quantity(s2.r, 10 * u.kpc)
s3 = PhysicsSphericalRepresentation(s1)
assert representation_equal(s3, s1)
def test_broadcasting(self):
s1 = PhysicsSphericalRepresentation(
phi=[8, 9] * u.hourangle, theta=[5, 6] * u.deg, r=10 * u.kpc
)
assert_allclose_quantity(s1.phi, [120, 135] * u.degree)
assert_allclose_quantity(s1.theta, [5, 6] * u.degree)
assert_allclose_quantity(s1.r, [10, 10] * u.kpc)
def test_broadcasting_mismatch(self):
with pytest.raises(
ValueError, match="Input parameters phi, theta, and r cannot be broadcast"
):
s1 = PhysicsSphericalRepresentation(
phi=[8, 9, 10] * u.hourangle, theta=[5, 6] * u.deg, r=[1, 2] * u.kpc
)
def test_readonly(self):
s1 = PhysicsSphericalRepresentation(
phi=[8, 9] * u.hourangle, theta=[5, 6] * u.deg, r=[10, 20] * u.kpc
)
with pytest.raises(AttributeError):
s1.phi = 1.0 * u.deg
with pytest.raises(AttributeError):
s1.theta = 1.0 * u.deg
with pytest.raises(AttributeError):
s1.r = 1.0 * u.kpc
def test_getitem(self):
s = PhysicsSphericalRepresentation(
phi=np.arange(10) * u.deg, theta=np.arange(5, 15) * u.deg, r=1 * u.kpc
)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.phi, [2, 4, 6] * u.deg)
assert_allclose_quantity(s_slc.theta, [7, 9, 11] * u.deg)
assert_allclose_quantity(s_slc.r, [1, 1, 1] * u.kpc)
def test_getitem_scalar(self):
s = PhysicsSphericalRepresentation(phi=1 * u.deg, theta=2 * u.deg, r=3 * u.kpc)
with pytest.raises(TypeError):
s_slc = s[0]
def test_representation_shortcuts(self):
"""Test that shortcuts in ``represent_as`` don't fail."""
difs = PhysicsSphericalDifferential(
4 * u.mas / u.yr, 5 * u.mas / u.yr, 6 * u.km / u.s
)
sph = PhysicsSphericalRepresentation(
1 * u.deg, 2 * u.deg, 3 * u.kpc, differentials={"s": difs}
)
got = sph.represent_as(SphericalRepresentation, SphericalDifferential)
assert np.may_share_memory(sph.phi, got.lon)
assert np.may_share_memory(sph.r, got.distance)
expected = BaseRepresentation.represent_as(
sph, SphericalRepresentation, SphericalDifferential
)
assert representation_equal_up_to_angular_type(got, expected)
got = sph.represent_as(UnitSphericalRepresentation, UnitSphericalDifferential)
assert np.may_share_memory(sph.phi, got.lon)
expected = BaseRepresentation.represent_as(
sph, UnitSphericalRepresentation, UnitSphericalDifferential
)
assert representation_equal_up_to_angular_type(got, expected)
def test_initialize_with_nan(self):
# Regression test for gh-11558: initialization used to fail.
psr = PhysicsSphericalRepresentation(
[1.0, np.nan] * u.deg, [np.nan, 2.0] * u.deg, [3.0, np.nan] * u.m
)
assert_array_equal(np.isnan(psr.phi), [False, True])
assert_array_equal(np.isnan(psr.theta), [True, False])
assert_array_equal(np.isnan(psr.r), [False, True])
def test_transform(self):
"""Test ``.transform()`` on rotation and general transform matrices."""
# set up representation
ds1 = PhysicsSphericalDifferential(
d_phi=[1, 2] * u.mas / u.yr,
d_theta=[3, 4] * u.mas / u.yr,
d_r=[-5, 6] * u.km / u.s,
)
s1 = PhysicsSphericalRepresentation(
phi=[1, 2] * u.deg,
theta=[3, 4] * u.deg,
r=[5, 6] * u.kpc,
differentials=ds1,
)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = PhysicsSphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2
)
assert_allclose_quantity(s2.phi, s1.phi + 10 * u.deg)
assert_allclose_quantity(s2.theta, s1.theta)
assert_allclose_quantity(s2.r, s1.r)
# compare differentials. should be unchanged (ds1).
assert_allclose_quantity(ds2.d_phi, ds1.d_phi)
assert_allclose_quantity(ds2.d_theta, ds1.d_theta)
assert_allclose_quantity(ds2.d_r, ds1.d_r)
assert_allclose_quantity(ds2.d_phi, dexpected.d_phi)
assert_allclose_quantity(ds2.d_theta, dexpected.d_theta)
assert_allclose_quantity(ds2.d_r, dexpected.d_r)
# now with a non rotation matrix
# transform representation & get comparison (thru CartesianRep)
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
expected = (
s1.represent_as(CartesianRepresentation, CartesianDifferential)
.transform(matrices["general"])
.represent_as(PhysicsSphericalRepresentation, PhysicsSphericalDifferential)
)
dexpected = expected.differentials["s"]
assert_allclose_quantity(s3.phi, expected.phi)
assert_allclose_quantity(s3.theta, expected.theta)
assert_allclose_quantity(s3.r, expected.r)
assert_allclose_quantity(ds3.d_phi, dexpected.d_phi)
assert_allclose_quantity(ds3.d_theta, dexpected.d_theta)
assert_allclose_quantity(ds3.d_r, dexpected.d_r)
def test_transform_with_NaN(self):
# all over again, but with a NaN in the distance
ds1 = PhysicsSphericalDifferential(
d_phi=[1, 2] * u.mas / u.yr,
d_theta=[3, 4] * u.mas / u.yr,
d_r=[-5, 6] * u.km / u.s,
)
s1 = PhysicsSphericalRepresentation(
phi=[1, 2] * u.deg,
theta=[3, 4] * u.deg,
r=[5, np.nan] * u.kpc,
differentials=ds1,
)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = PhysicsSphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2
)
assert_allclose_quantity(s2.phi, s1.phi + 10 * u.deg)
assert_allclose_quantity(s2.theta, s1.theta)
assert_allclose_quantity(s2.r, s1.r)
assert_allclose_quantity(ds2.d_phi, dexpected.d_phi)
assert_allclose_quantity(ds2.d_theta, dexpected.d_theta)
assert_allclose_quantity(ds2.d_r, dexpected.d_r)
# now with a non rotation matrix
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
thruC = (
s1.represent_as(CartesianRepresentation, CartesianDifferential)
.transform(matrices["general"])
.represent_as(PhysicsSphericalRepresentation, PhysicsSphericalDifferential)
)
dthruC = thruC.differentials["s"]
# s3 should not propagate Nan.
assert_array_equal(np.isnan(s3.phi), (False, False))
assert_array_equal(np.isnan(s3.theta), (False, False))
assert_array_equal(np.isnan(s3.r), (False, True))
# ds3 does b/c currently aren't any shortcuts on the transform
assert_array_equal(np.isnan(ds3.d_phi), (False, True))
assert_array_equal(np.isnan(ds3.d_theta), (False, True))
assert_array_equal(np.isnan(ds3.d_r), (False, True))
# through Cartesian does
assert_array_equal(np.isnan(thruC.phi), (False, True))
assert_array_equal(np.isnan(thruC.theta), (False, True))
assert_array_equal(np.isnan(thruC.r), (False, True))
# so only test on the first value
assert_allclose_quantity(s3.phi[0], thruC.phi[0])
assert_allclose_quantity(s3.theta[0], thruC.theta[0])
assert_allclose_quantity(ds3.d_phi[0], dthruC.d_phi[0])
assert_allclose_quantity(ds3.d_theta[0], dthruC.d_theta[0])
class TestCartesianRepresentation:
def test_name(self):
assert CartesianRepresentation.get_name() == "cartesian"
assert CartesianRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = CartesianRepresentation()
def test_init_quantity(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert s1.x.unit is u.kpc
assert s1.y.unit is u.kpc
assert s1.z.unit is u.kpc
assert_allclose(s1.x.value, 1)
assert_allclose(s1.y.value, 2)
assert_allclose(s1.z.value, 3)
def test_init_singleunit(self):
s1 = CartesianRepresentation(x=1, y=2, z=3, unit=u.kpc)
assert s1.x.unit is u.kpc
assert s1.y.unit is u.kpc
assert s1.z.unit is u.kpc
assert_allclose(s1.x.value, 1)
assert_allclose(s1.y.value, 2)
assert_allclose(s1.z.value, 3)
def test_init_array(self):
s1 = CartesianRepresentation(
x=[1, 2, 3] * u.pc, y=[2, 3, 4] * u.Mpc, z=[3, 4, 5] * u.kpc
)
assert s1.x.unit is u.pc
assert s1.y.unit is u.Mpc
assert s1.z.unit is u.kpc
assert_allclose(s1.x.value, [1, 2, 3])
assert_allclose(s1.y.value, [2, 3, 4])
assert_allclose(s1.z.value, [3, 4, 5])
def test_init_one_array(self):
s1 = CartesianRepresentation(x=[1, 2, 3] * u.pc)
assert s1.x.unit is u.pc
assert s1.y.unit is u.pc
assert s1.z.unit is u.pc
assert_allclose(s1.x.value, 1)
assert_allclose(s1.y.value, 2)
assert_allclose(s1.z.value, 3)
r = np.arange(27.0).reshape(3, 3, 3) * u.kpc
s2 = CartesianRepresentation(r, xyz_axis=0)
assert s2.shape == (3, 3)
assert s2.x.unit == u.kpc
assert np.all(s2.x == r[0])
assert np.all(s2.xyz == r)
assert np.all(s2.get_xyz(xyz_axis=0) == r)
s3 = CartesianRepresentation(r, xyz_axis=1)
assert s3.shape == (3, 3)
assert np.all(s3.x == r[:, 0])
assert np.all(s3.y == r[:, 1])
assert np.all(s3.z == r[:, 2])
assert np.all(s3.get_xyz(xyz_axis=1) == r)
s4 = CartesianRepresentation(r, xyz_axis=2)
assert s4.shape == (3, 3)
assert np.all(s4.x == r[:, :, 0])
assert np.all(s4.get_xyz(xyz_axis=2) == r)
s5 = CartesianRepresentation(r, unit=u.pc)
assert s5.x.unit == u.pc
assert np.all(s5.xyz == r)
s6 = CartesianRepresentation(r.value, unit=u.pc, xyz_axis=2)
assert s6.x.unit == u.pc
assert np.all(s6.get_xyz(xyz_axis=2).value == r.value)
def test_init_one_array_size_fail(self):
with pytest.raises(ValueError) as exc:
CartesianRepresentation(x=[1, 2, 3, 4] * u.pc)
assert exc.value.args[0].startswith("too many values to unpack")
def test_init_xyz_but_more_than_one_array_fail(self):
with pytest.raises(ValueError) as exc:
CartesianRepresentation(
x=[1, 2, 3] * u.pc, y=[2, 3, 4] * u.pc, z=[3, 4, 5] * u.pc, xyz_axis=0
)
assert "xyz_axis should only be set" in str(exc.value)
def test_init_one_array_yz_fail(self):
with pytest.raises(
ValueError,
match="x, y, and z are required to instantiate CartesianRepresentation",
):
CartesianRepresentation(x=[1, 2, 3, 4] * u.pc, y=[1, 2] * u.pc)
def test_init_array_nocopy(self):
x = [8, 9, 10] * u.pc
y = [5, 6, 7] * u.Mpc
z = [2, 3, 4] * u.kpc
s1 = CartesianRepresentation(x=x, y=y, z=z, copy=False)
x[:] = [1, 2, 3] * u.kpc
y[:] = [9, 9, 8] * u.kpc
z[:] = [1, 2, 1] * u.kpc
assert_allclose_quantity(x, s1.x)
assert_allclose_quantity(y, s1.y)
assert_allclose_quantity(z, s1.z)
def test_xyz_is_view_if_possible(self):
xyz = np.arange(1.0, 10.0).reshape(3, 3)
s1 = CartesianRepresentation(xyz, unit=u.kpc, copy=False)
s1_xyz = s1.xyz
assert s1_xyz.value[0, 0] == 1.0
xyz[0, 0] = 0.0
assert s1.x[0] == 0.0
assert s1_xyz.value[0, 0] == 0.0
# Not possible: we don't check that tuples are from the same array
xyz = np.arange(1.0, 10.0).reshape(3, 3)
s2 = CartesianRepresentation(*xyz, unit=u.kpc, copy=False)
s2_xyz = s2.xyz
assert s2_xyz.value[0, 0] == 1.0
xyz[0, 0] = 0.0
assert s2.x[0] == 0.0
assert s2_xyz.value[0, 0] == 1.0
def test_reprobj(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
s2 = CartesianRepresentation.from_representation(s1)
assert s2.x == 1 * u.kpc
assert s2.y == 2 * u.kpc
assert s2.z == 3 * u.kpc
s3 = CartesianRepresentation(s1)
assert representation_equal(s3, s1)
def test_broadcasting(self):
s1 = CartesianRepresentation(x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=5 * u.kpc)
assert s1.x.unit == u.kpc
assert s1.y.unit == u.kpc
assert s1.z.unit == u.kpc
assert_allclose(s1.x.value, [1, 2])
assert_allclose(s1.y.value, [3, 4])
assert_allclose(s1.z.value, [5, 5])
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = CartesianRepresentation(
x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=[5, 6, 7] * u.kpc
)
assert exc.value.args[0] == "Input parameters x, y, and z cannot be broadcast"
def test_readonly(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
with pytest.raises(AttributeError):
s1.x = 1.0 * u.kpc
with pytest.raises(AttributeError):
s1.y = 1.0 * u.kpc
with pytest.raises(AttributeError):
s1.z = 1.0 * u.kpc
def test_xyz(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert isinstance(s1.xyz, u.Quantity)
assert s1.xyz.unit is u.kpc
assert_allclose(s1.xyz.value, [1, 2, 3])
def test_unit_mismatch(self):
q_len = u.Quantity([1], u.km)
q_nonlen = u.Quantity([1], u.kg)
with pytest.raises(u.UnitsError) as exc:
s1 = CartesianRepresentation(x=q_nonlen, y=q_len, z=q_len)
assert exc.value.args[0] == "x, y, and z should have matching physical types"
with pytest.raises(u.UnitsError) as exc:
s1 = CartesianRepresentation(x=q_len, y=q_nonlen, z=q_len)
assert exc.value.args[0] == "x, y, and z should have matching physical types"
with pytest.raises(u.UnitsError) as exc:
s1 = CartesianRepresentation(x=q_len, y=q_len, z=q_nonlen)
assert exc.value.args[0] == "x, y, and z should have matching physical types"
def test_unit_non_length(self):
s1 = CartesianRepresentation(x=1 * u.kg, y=2 * u.kg, z=3 * u.kg)
s2 = CartesianRepresentation(
x=1 * u.km / u.s, y=2 * u.km / u.s, z=3 * u.km / u.s
)
banana = u.def_unit("banana")
s3 = CartesianRepresentation(x=1 * banana, y=2 * banana, z=3 * banana)
def test_getitem(self):
s = CartesianRepresentation(
x=np.arange(10) * u.m, y=-np.arange(10) * u.m, z=3 * u.km
)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.x, [2, 4, 6] * u.m)
assert_allclose_quantity(s_slc.y, [-2, -4, -6] * u.m)
assert_allclose_quantity(s_slc.z, [3, 3, 3] * u.km)
def test_getitem_scalar(self):
s = CartesianRepresentation(x=1 * u.m, y=-2 * u.m, z=3 * u.km)
with pytest.raises(TypeError):
s_slc = s[0]
def test_transform(self):
ds1 = CartesianDifferential(
d_x=[1, 2] * u.km / u.s, d_y=[3, 4] * u.km / u.s, d_z=[5, 6] * u.km / u.s
)
s1 = CartesianRepresentation(
x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=[5, 6] * u.kpc, differentials=ds1
)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["general"])
ds2 = s2.differentials["s"]
dexpected = CartesianDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["general"]), base=s2
)
assert_allclose_quantity(ds2.d_x, dexpected.d_x)
assert_allclose_quantity(ds2.d_y, dexpected.d_y)
assert_allclose_quantity(ds2.d_z, dexpected.d_z)
# also explicitly calculate, since we can
assert_allclose(s2.x.value, [1 * 1 + 2 * 3 + 3 * 5, 1 * 2 + 2 * 4 + 3 * 6])
assert_allclose(s2.y.value, [4 * 1 + 5 * 3 + 6 * 5, 4 * 2 + 5 * 4 + 6 * 6])
assert_allclose(s2.z.value, [7 * 1 + 8 * 3 + 9 * 5, 7 * 2 + 8 * 4 + 9 * 6])
assert_allclose(ds2.d_x.value, [1 * 1 + 2 * 3 + 3 * 5, 1 * 2 + 2 * 4 + 3 * 6])
assert_allclose(ds2.d_y.value, [4 * 1 + 5 * 3 + 6 * 5, 4 * 2 + 5 * 4 + 6 * 6])
assert_allclose(ds2.d_z.value, [7 * 1 + 8 * 3 + 9 * 5, 7 * 2 + 8 * 4 + 9 * 6])
assert s2.x.unit is u.kpc
assert s2.y.unit is u.kpc
assert s2.z.unit is u.kpc
assert ds2.d_x.unit == u.km / u.s
assert ds2.d_y.unit == u.km / u.s
assert ds2.d_z.unit == u.km / u.s
class TestCylindricalRepresentation:
def test_name(self):
assert CylindricalRepresentation.get_name() == "cylindrical"
assert CylindricalRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = CylindricalRepresentation()
def test_init_quantity(self):
s1 = CylindricalRepresentation(rho=1 * u.kpc, phi=2 * u.deg, z=3 * u.kpc)
assert s1.rho.unit is u.kpc
assert s1.phi.unit is u.deg
assert s1.z.unit is u.kpc
assert_allclose(s1.rho.value, 1)
assert_allclose(s1.phi.value, 2)
assert_allclose(s1.z.value, 3)
def test_init_array(self):
s1 = CylindricalRepresentation(
rho=[1, 2, 3] * u.pc, phi=[2, 3, 4] * u.deg, z=[3, 4, 5] * u.kpc
)
assert s1.rho.unit is u.pc
assert s1.phi.unit is u.deg
assert s1.z.unit is u.kpc
assert_allclose(s1.rho.value, [1, 2, 3])
assert_allclose(s1.phi.value, [2, 3, 4])
assert_allclose(s1.z.value, [3, 4, 5])
def test_init_array_nocopy(self):
rho = [8, 9, 10] * u.pc
phi = [5, 6, 7] * u.deg
z = [2, 3, 4] * u.kpc
s1 = CylindricalRepresentation(rho=rho, phi=phi, z=z, copy=False)
rho[:] = [9, 2, 3] * u.kpc
phi[:] = [1, 2, 3] * u.arcmin
z[:] = [-2, 3, 8] * u.kpc
assert_allclose_quantity(rho, s1.rho)
assert_allclose_quantity(phi, s1.phi)
assert_allclose_quantity(z, s1.z)
def test_reprobj(self):
s1 = CylindricalRepresentation(rho=1 * u.kpc, phi=2 * u.deg, z=3 * u.kpc)
s2 = CylindricalRepresentation.from_representation(s1)
assert s2.rho == 1 * u.kpc
assert s2.phi == 2 * u.deg
assert s2.z == 3 * u.kpc
s3 = CylindricalRepresentation(s1)
assert representation_equal(s3, s1)
def test_broadcasting(self):
s1 = CylindricalRepresentation(
rho=[1, 2] * u.kpc, phi=[3, 4] * u.deg, z=5 * u.kpc
)
assert s1.rho.unit == u.kpc
assert s1.phi.unit == u.deg
assert s1.z.unit == u.kpc
assert_allclose(s1.rho.value, [1, 2])
assert_allclose(s1.phi.value, [3, 4])
assert_allclose(s1.z.value, [5, 5])
def test_broadcasting_mismatch(self):
with pytest.raises(
ValueError, match="Input parameters rho, phi, and z cannot be broadcast"
):
s1 = CylindricalRepresentation(
rho=[1, 2] * u.kpc, phi=[3, 4] * u.deg, z=[5, 6, 7] * u.kpc
)
def test_readonly(self):
s1 = CylindricalRepresentation(rho=1 * u.kpc, phi=20 * u.deg, z=3 * u.kpc)
with pytest.raises(AttributeError):
s1.rho = 1.0 * u.kpc
with pytest.raises(AttributeError):
s1.phi = 20 * u.deg
with pytest.raises(AttributeError):
s1.z = 1.0 * u.kpc
def unit_mismatch(self):
q_len = u.Quantity([1], u.kpc)
q_nonlen = u.Quantity([1], u.kg)
with pytest.raises(u.UnitsError) as exc:
s1 = CylindricalRepresentation(rho=q_nonlen, phi=10 * u.deg, z=q_len)
assert exc.value.args[0] == "rho and z should have matching physical types"
with pytest.raises(u.UnitsError) as exc:
s1 = CylindricalRepresentation(rho=q_len, phi=10 * u.deg, z=q_nonlen)
assert exc.value.args[0] == "rho and z should have matching physical types"
def test_getitem(self):
s = CylindricalRepresentation(
rho=np.arange(10) * u.pc, phi=-np.arange(10) * u.deg, z=1 * u.kpc
)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.rho, [2, 4, 6] * u.pc)
assert_allclose_quantity(s_slc.phi, [-2, -4, -6] * u.deg)
assert_allclose_quantity(s_slc.z, [1, 1, 1] * u.kpc)
def test_getitem_scalar(self):
s = CylindricalRepresentation(rho=1 * u.pc, phi=-2 * u.deg, z=3 * u.kpc)
with pytest.raises(TypeError):
s_slc = s[0]
def test_transform(self):
s1 = CylindricalRepresentation(
phi=[1, 2] * u.deg, z=[3, 4] * u.pc, rho=[5, 6] * u.kpc
)
s2 = s1.transform(matrices["rotation"])
assert_allclose_quantity(s2.phi, s1.phi + 10 * u.deg)
assert_allclose_quantity(s2.z, s1.z)
assert_allclose_quantity(s2.rho, s1.rho)
assert s2.phi.unit is u.rad
assert s2.z.unit is u.kpc
assert s2.rho.unit is u.kpc
# now with a non rotation matrix
s3 = s1.transform(matrices["general"])
expected = (s1.to_cartesian().transform(matrices["general"])).represent_as(
CylindricalRepresentation
)
assert_allclose_quantity(s3.phi, expected.phi)
assert_allclose_quantity(s3.z, expected.z)
assert_allclose_quantity(s3.rho, expected.rho)
class TestUnitSphericalCosLatDifferential:
@pytest.mark.parametrize("matrix", list(matrices.values()))
def test_transform(self, matrix):
"""Test ``.transform()`` on rotation and general matrices."""
# set up representation
ds1 = UnitSphericalCosLatDifferential(
d_lon_coslat=[1, 2] * u.mas / u.yr,
d_lat=[3, 4] * u.mas / u.yr,
)
s1 = UnitSphericalRepresentation(lon=[1, 2] * u.deg, lat=[3, 4] * u.deg)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrix)
ds2 = ds1.transform(matrix, s1, s2)
dexpected = UnitSphericalCosLatDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrix), base=s2
)
assert_allclose_quantity(ds2.d_lon_coslat, dexpected.d_lon_coslat)
assert_allclose_quantity(ds2.d_lat, dexpected.d_lat)
def test_cartesian_spherical_roundtrip():
s1 = CartesianRepresentation(
x=[1, 2000.0] * u.kpc, y=[3000.0, 4.0] * u.pc, z=[5.0, 6000.0] * u.pc
)
s2 = SphericalRepresentation.from_representation(s1)
s3 = CartesianRepresentation.from_representation(s2)
s4 = SphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.x, s3.x)
assert_allclose_quantity(s1.y, s3.y)
assert_allclose_quantity(s1.z, s3.z)
assert_allclose_quantity(s2.lon, s4.lon)
assert_allclose_quantity(s2.lat, s4.lat)
assert_allclose_quantity(s2.distance, s4.distance)
def test_cartesian_setting_with_other():
s1 = CartesianRepresentation(
x=[1, 2000.0] * u.kpc, y=[3000.0, 4.0] * u.pc, z=[5.0, 6000.0] * u.pc
)
s1[0] = SphericalRepresentation(0.0 * u.deg, 0.0 * u.deg, 1 * u.kpc)
assert_allclose_quantity(s1.x, [1.0, 2000.0] * u.kpc)
assert_allclose_quantity(s1.y, [0.0, 4.0] * u.pc)
assert_allclose_quantity(s1.z, [0.0, 6000.0] * u.pc)
with pytest.raises(ValueError, match="loss of information"):
s1[1] = UnitSphericalRepresentation(0.0 * u.deg, 10.0 * u.deg)
def test_cartesian_physics_spherical_roundtrip():
s1 = CartesianRepresentation(
x=[1, 2000.0] * u.kpc, y=[3000.0, 4.0] * u.pc, z=[5.0, 6000.0] * u.pc
)
s2 = PhysicsSphericalRepresentation.from_representation(s1)
s3 = CartesianRepresentation.from_representation(s2)
s4 = PhysicsSphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.x, s3.x)
assert_allclose_quantity(s1.y, s3.y)
assert_allclose_quantity(s1.z, s3.z)
assert_allclose_quantity(s2.phi, s4.phi)
assert_allclose_quantity(s2.theta, s4.theta)
assert_allclose_quantity(s2.r, s4.r)
def test_spherical_physics_spherical_roundtrip():
s1 = SphericalRepresentation(lon=3 * u.deg, lat=4 * u.deg, distance=3 * u.kpc)
s2 = PhysicsSphericalRepresentation.from_representation(s1)
s3 = SphericalRepresentation.from_representation(s2)
s4 = PhysicsSphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.lon, s3.lon)
assert_allclose_quantity(s1.lat, s3.lat)
assert_allclose_quantity(s1.distance, s3.distance)
assert_allclose_quantity(s2.phi, s4.phi)
assert_allclose_quantity(s2.theta, s4.theta)
assert_allclose_quantity(s2.r, s4.r)
assert_allclose_quantity(s1.lon, s4.phi)
assert_allclose_quantity(s1.lat, 90.0 * u.deg - s4.theta)
assert_allclose_quantity(s1.distance, s4.r)
def test_cartesian_cylindrical_roundtrip():
s1 = CartesianRepresentation(
x=np.array([1.0, 2000.0]) * u.kpc,
y=np.array([3000.0, 4.0]) * u.pc,
z=np.array([5.0, 600.0]) * u.cm,
)
s2 = CylindricalRepresentation.from_representation(s1)
s3 = CartesianRepresentation.from_representation(s2)
s4 = CylindricalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.x, s3.x)
assert_allclose_quantity(s1.y, s3.y)
assert_allclose_quantity(s1.z, s3.z)
assert_allclose_quantity(s2.rho, s4.rho)
assert_allclose_quantity(s2.phi, s4.phi)
assert_allclose_quantity(s2.z, s4.z)
def test_unit_spherical_roundtrip():
s1 = UnitSphericalRepresentation(
lon=[10.0, 30.0] * u.deg, lat=[5.0, 6.0] * u.arcmin
)
s2 = CartesianRepresentation.from_representation(s1)
s3 = SphericalRepresentation.from_representation(s2)
s4 = UnitSphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.lon, s4.lon)
assert_allclose_quantity(s1.lat, s4.lat)
def test_no_unnecessary_copies():
s1 = UnitSphericalRepresentation(
lon=[10.0, 30.0] * u.deg, lat=[5.0, 6.0] * u.arcmin
)
s2 = s1.represent_as(UnitSphericalRepresentation)
assert s2 is s1
assert np.may_share_memory(s1.lon, s2.lon)
assert np.may_share_memory(s1.lat, s2.lat)
s3 = s1.represent_as(SphericalRepresentation)
assert np.may_share_memory(s1.lon, s3.lon)
assert np.may_share_memory(s1.lat, s3.lat)
s4 = s1.represent_as(CartesianRepresentation)
s5 = s4.represent_as(CylindricalRepresentation)
assert np.may_share_memory(s5.z, s4.z)
def test_representation_repr():
r1 = SphericalRepresentation(lon=1 * u.deg, lat=2.5 * u.deg, distance=1 * u.kpc)
assert (
repr(r1) == "<SphericalRepresentation (lon, lat, distance) in (deg, deg, kpc)\n"
" (1., 2.5, 1.)>"
)
r2 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert repr(r2) == "<CartesianRepresentation (x, y, z) in kpc\n (1., 2., 3.)>"
r3 = CartesianRepresentation(
x=[1, 2, 3] * u.kpc, y=4 * u.kpc, z=[9, 10, 11] * u.kpc
)
assert (
repr(r3) == "<CartesianRepresentation (x, y, z) in kpc\n"
" [(1., 4., 9.), (2., 4., 10.), (3., 4., 11.)]>"
)
def test_representation_repr_multi_d():
"""Regression test for #5889."""
cr = CartesianRepresentation(np.arange(27).reshape(3, 3, 3), unit="m")
assert (
repr(cr) == "<CartesianRepresentation (x, y, z) in m\n"
" [[(0., 9., 18.), (1., 10., 19.), (2., 11., 20.)],\n"
" [(3., 12., 21.), (4., 13., 22.), (5., 14., 23.)],\n"
" [(6., 15., 24.), (7., 16., 25.), (8., 17., 26.)]]>"
)
# This was broken before.
assert (
repr(cr.T) == "<CartesianRepresentation (x, y, z) in m\n"
" [[(0., 9., 18.), (3., 12., 21.), (6., 15., 24.)],\n"
" [(1., 10., 19.), (4., 13., 22.), (7., 16., 25.)],\n"
" [(2., 11., 20.), (5., 14., 23.), (8., 17., 26.)]]>"
)
def test_representation_str():
r1 = SphericalRepresentation(lon=1 * u.deg, lat=2.5 * u.deg, distance=1 * u.kpc)
assert str(r1) == "(1., 2.5, 1.) (deg, deg, kpc)"
r2 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert str(r2) == "(1., 2., 3.) kpc"
r3 = CartesianRepresentation(
x=[1, 2, 3] * u.kpc, y=4 * u.kpc, z=[9, 10, 11] * u.kpc
)
assert str(r3) == "[(1., 4., 9.), (2., 4., 10.), (3., 4., 11.)] kpc"
def test_representation_str_multi_d():
"""Regression test for #5889."""
cr = CartesianRepresentation(np.arange(27).reshape(3, 3, 3), unit="m")
assert (
str(cr) == "[[(0., 9., 18.), (1., 10., 19.), (2., 11., 20.)],\n"
" [(3., 12., 21.), (4., 13., 22.), (5., 14., 23.)],\n"
" [(6., 15., 24.), (7., 16., 25.), (8., 17., 26.)]] m"
)
# This was broken before.
assert (
str(cr.T) == "[[(0., 9., 18.), (3., 12., 21.), (6., 15., 24.)],\n"
" [(1., 10., 19.), (4., 13., 22.), (7., 16., 25.)],\n"
" [(2., 11., 20.), (5., 14., 23.), (8., 17., 26.)]] m"
)
def test_subclass_representation():
from astropy.coordinates.builtin_frames import ICRS
class Longitude180(Longitude):
def __new__(cls, angle, unit=None, wrap_angle=180 * u.deg, **kwargs):
self = super().__new__(
cls, angle, unit=unit, wrap_angle=wrap_angle, **kwargs
)
return self
class SphericalWrap180Representation(SphericalRepresentation):
attr_classes = {"lon": Longitude180, "lat": Latitude, "distance": u.Quantity}
class ICRSWrap180(ICRS):
frame_specific_representation_info = (
ICRS._frame_specific_representation_info.copy()
)
frame_specific_representation_info[
SphericalWrap180Representation
] = frame_specific_representation_info[SphericalRepresentation]
default_representation = SphericalWrap180Representation
c = ICRSWrap180(ra=-1 * u.deg, dec=-2 * u.deg, distance=1 * u.m)
assert c.ra.value == -1
assert c.ra.unit is u.deg
assert c.dec.value == -2
assert c.dec.unit is u.deg
def test_minimal_subclass():
# Basically to check what we document works;
# see doc/coordinates/representations.rst
class LogDRepresentation(BaseRepresentation):
attr_classes = {"lon": Longitude, "lat": Latitude, "logd": u.Dex}
def to_cartesian(self):
d = self.logd.physical
x = d * np.cos(self.lat) * np.cos(self.lon)
y = d * np.cos(self.lat) * np.sin(self.lon)
z = d * np.sin(self.lat)
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
@classmethod
def from_cartesian(cls, cart):
s = np.hypot(cart.x, cart.y)
r = np.hypot(s, cart.z)
lon = np.arctan2(cart.y, cart.x)
lat = np.arctan2(cart.z, s)
return cls(lon=lon, lat=lat, logd=u.Dex(r), copy=False)
ld1 = LogDRepresentation(90.0 * u.deg, 0.0 * u.deg, 1.0 * u.dex(u.kpc))
ld2 = LogDRepresentation(lon=90.0 * u.deg, lat=0.0 * u.deg, logd=1.0 * u.dex(u.kpc))
assert np.all(ld1.lon == ld2.lon)
assert np.all(ld1.lat == ld2.lat)
assert np.all(ld1.logd == ld2.logd)
c = ld1.to_cartesian()
assert_allclose_quantity(c.xyz, [0.0, 10.0, 0.0] * u.kpc, atol=1.0 * u.npc)
ld3 = LogDRepresentation.from_cartesian(c)
assert np.all(ld3.lon == ld2.lon)
assert np.all(ld3.lat == ld2.lat)
assert np.all(ld3.logd == ld2.logd)
s = ld1.represent_as(SphericalRepresentation)
assert_allclose_quantity(s.lon, ld1.lon)
assert_allclose_quantity(s.distance, 10.0 * u.kpc)
assert_allclose_quantity(s.lat, ld1.lat)
with pytest.raises(TypeError):
LogDRepresentation(0.0 * u.deg, 1.0 * u.deg)
with pytest.raises(TypeError):
LogDRepresentation(
0.0 * u.deg, 1.0 * u.deg, 1.0 * u.dex(u.kpc), lon=1.0 * u.deg
)
with pytest.raises(TypeError):
LogDRepresentation(0.0 * u.deg, 1.0 * u.deg, 1.0 * u.dex(u.kpc), True, False)
with pytest.raises(TypeError):
LogDRepresentation(0.0 * u.deg, 1.0 * u.deg, 1.0 * u.dex(u.kpc), foo="bar")
# if we define it a second time, even the qualnames are the same,
# so we raise
with pytest.raises(ValueError):
class LogDRepresentation(BaseRepresentation):
attr_classes = {"lon": Longitude, "lat": Latitude, "logr": u.Dex}
def test_duplicate_warning():
from astropy.coordinates.representation import (
DUPLICATE_REPRESENTATIONS,
REPRESENTATION_CLASSES,
)
with pytest.warns(DuplicateRepresentationWarning):
class UnitSphericalRepresentation(BaseRepresentation):
attr_classes = {"lon": Longitude, "lat": Latitude}
assert "unitspherical" in DUPLICATE_REPRESENTATIONS
assert "unitspherical" not in REPRESENTATION_CLASSES
assert (
"astropy.coordinates.representation.UnitSphericalRepresentation"
in REPRESENTATION_CLASSES
)
assert (
__name__ + ".test_duplicate_warning.<locals>.UnitSphericalRepresentation"
in REPRESENTATION_CLASSES
)
class TestCartesianRepresentationWithDifferential:
def test_init_differential(self):
diff = CartesianDifferential(
d_x=1 * u.km / u.s, d_y=2 * u.km / u.s, d_z=3 * u.km / u.s
)
# Check that a single differential gets turned into a 1-item dict.
s1 = CartesianRepresentation(
x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials=diff
)
assert s1.x.unit is u.kpc
assert s1.y.unit is u.kpc
assert s1.z.unit is u.kpc
assert len(s1.differentials) == 1
assert s1.differentials["s"] is diff
# can also pass in an explicit dictionary
s1 = CartesianRepresentation(
x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials={"s": diff}
)
assert len(s1.differentials) == 1
assert s1.differentials["s"] is diff
# using the wrong key will cause it to fail
with pytest.raises(ValueError):
s1 = CartesianRepresentation(
x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials={"1 / s2": diff}
)
# make sure other kwargs are handled properly
s1 = CartesianRepresentation(
x=1, y=2, z=3, differentials=diff, copy=False, unit=u.kpc
)
assert len(s1.differentials) == 1
assert s1.differentials["s"] is diff
with pytest.raises(TypeError): # invalid type passed to differentials
CartesianRepresentation(
x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials="garmonbozia"
)
# And that one can add it to another representation.
s1 = CartesianRepresentation(
CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc),
differentials=diff,
)
assert len(s1.differentials) == 1
assert s1.differentials["s"] is diff
# make sure differentials can't accept differentials
with pytest.raises(TypeError):
CartesianDifferential(
d_x=1 * u.km / u.s,
d_y=2 * u.km / u.s,
d_z=3 * u.km / u.s,
differentials=diff,
)
def test_init_differential_compatible(self):
# TODO: more extensive checking of this
# should fail - representation and differential not compatible
diff = SphericalDifferential(
d_lon=1 * u.mas / u.yr, d_lat=2 * u.mas / u.yr, d_distance=3 * u.km / u.s
)
with pytest.raises(TypeError):
CartesianRepresentation(
x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials=diff
)
# should succeed - representation and differential are compatible
diff = SphericalCosLatDifferential(
d_lon_coslat=1 * u.mas / u.yr,
d_lat=2 * u.mas / u.yr,
d_distance=3 * u.km / u.s,
)
r1 = SphericalRepresentation(
lon=15 * u.deg, lat=21 * u.deg, distance=1 * u.pc, differentials=diff
)
def test_init_differential_multiple_equivalent_keys(self):
d1 = CartesianDifferential(*[1, 2, 3] * u.km / u.s)
d2 = CartesianDifferential(*[4, 5, 6] * u.km / u.s)
# verify that the check against expected_unit validates against passing
# in two different but equivalent keys
with pytest.raises(ValueError):
r1 = CartesianRepresentation(
x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials={"s": d1, "yr": d2}
)
def test_init_array_broadcasting(self):
arr1 = np.arange(8).reshape(4, 2) * u.km / u.s
diff = CartesianDifferential(d_x=arr1, d_y=arr1, d_z=arr1)
# shapes aren't compatible
arr2 = np.arange(27).reshape(3, 9) * u.kpc
with pytest.raises(ValueError):
rep = CartesianRepresentation(x=arr2, y=arr2, z=arr2, differentials=diff)
arr2 = np.arange(8).reshape(4, 2) * u.kpc
rep = CartesianRepresentation(x=arr2, y=arr2, z=arr2, differentials=diff)
assert rep.x.unit is u.kpc
assert rep.y.unit is u.kpc
assert rep.z.unit is u.kpc
assert len(rep.differentials) == 1
assert rep.differentials["s"] is diff
assert rep.xyz.shape == rep.differentials["s"].d_xyz.shape
def test_reprobj(self):
# should succeed - representation and differential are compatible
diff = SphericalCosLatDifferential(
d_lon_coslat=1 * u.mas / u.yr,
d_lat=2 * u.mas / u.yr,
d_distance=3 * u.km / u.s,
)
r1 = SphericalRepresentation(
lon=15 * u.deg, lat=21 * u.deg, distance=1 * u.pc, differentials=diff
)
r2 = CartesianRepresentation.from_representation(r1)
assert r2.get_name() == "cartesian"
assert not r2.differentials
r3 = SphericalRepresentation(r1)
assert r3.differentials
assert representation_equal(r3, r1)
def test_readonly(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
with pytest.raises(AttributeError): # attribute is not settable
s1.differentials = "thing"
def test_represent_as(self):
diff = CartesianDifferential(
d_x=1 * u.km / u.s, d_y=2 * u.km / u.s, d_z=3 * u.km / u.s
)
rep1 = CartesianRepresentation(
x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials=diff
)
# Only change the representation, drop the differential
new_rep = rep1.represent_as(SphericalRepresentation)
assert new_rep.get_name() == "spherical"
assert not new_rep.differentials # dropped
# Pass in separate classes for representation, differential
new_rep = rep1.represent_as(
SphericalRepresentation, SphericalCosLatDifferential
)
assert new_rep.get_name() == "spherical"
assert new_rep.differentials["s"].get_name() == "sphericalcoslat"
# Pass in a dictionary for the differential classes
new_rep = rep1.represent_as(
SphericalRepresentation, {"s": SphericalCosLatDifferential}
)
assert new_rep.get_name() == "spherical"
assert new_rep.differentials["s"].get_name() == "sphericalcoslat"
# make sure represent_as() passes through the differentials
for name in REPRESENTATION_CLASSES:
if name == "radial":
# TODO: Converting a CartesianDifferential to a
# RadialDifferential fails, even on `main`
continue
elif name.endswith("geodetic"):
# TODO: Geodetic representations do not have differentials yet
continue
new_rep = rep1.represent_as(
REPRESENTATION_CLASSES[name], DIFFERENTIAL_CLASSES[name]
)
assert new_rep.get_name() == name
assert len(new_rep.differentials) == 1
assert new_rep.differentials["s"].get_name() == name
with pytest.raises(ValueError) as excinfo:
rep1.represent_as("name")
assert "use frame object" in str(excinfo.value)
@pytest.mark.parametrize(
"sph_diff,usph_diff",
[
(SphericalDifferential, UnitSphericalDifferential),
(SphericalCosLatDifferential, UnitSphericalCosLatDifferential),
],
)
def test_represent_as_unit_spherical_with_diff(self, sph_diff, usph_diff):
"""Test that differential angles are correctly reduced."""
diff = CartesianDifferential(
d_x=1 * u.km / u.s, d_y=2 * u.km / u.s, d_z=3 * u.km / u.s
)
rep = CartesianRepresentation(
x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials=diff
)
sph = rep.represent_as(SphericalRepresentation, sph_diff)
usph = rep.represent_as(UnitSphericalRepresentation, usph_diff)
assert components_equal(usph, sph.represent_as(UnitSphericalRepresentation))
assert components_equal(
usph.differentials["s"], sph.differentials["s"].represent_as(usph_diff)
)
# Just to be sure components_equal and the represent_as work as advertised,
# a sanity check: d_lat is always defined and should be the same.
assert_array_equal(sph.differentials["s"].d_lat, usph.differentials["s"].d_lat)
def test_getitem(self):
d = CartesianDifferential(
d_x=np.arange(10) * u.m / u.s,
d_y=-np.arange(10) * u.m / u.s,
d_z=1.0 * u.m / u.s,
)
s = CartesianRepresentation(
x=np.arange(10) * u.m, y=-np.arange(10) * u.m, z=3 * u.km, differentials=d
)
s_slc = s[2:8:2]
s_dif = s_slc.differentials["s"]
assert_allclose_quantity(s_slc.x, [2, 4, 6] * u.m)
assert_allclose_quantity(s_slc.y, [-2, -4, -6] * u.m)
assert_allclose_quantity(s_slc.z, [3, 3, 3] * u.km)
assert_allclose_quantity(s_dif.d_x, [2, 4, 6] * u.m / u.s)
assert_allclose_quantity(s_dif.d_y, [-2, -4, -6] * u.m / u.s)
assert_allclose_quantity(s_dif.d_z, [1, 1, 1] * u.m / u.s)
def test_setitem(self):
d = CartesianDifferential(
d_x=np.arange(5) * u.m / u.s,
d_y=-np.arange(5) * u.m / u.s,
d_z=1.0 * u.m / u.s,
)
s = CartesianRepresentation(
x=np.arange(5) * u.m, y=-np.arange(5) * u.m, z=3 * u.km, differentials=d
)
s[:2] = s[2]
assert_array_equal(s.x, [2, 2, 2, 3, 4] * u.m)
assert_array_equal(s.y, [-2, -2, -2, -3, -4] * u.m)
assert_array_equal(s.z, [3, 3, 3, 3, 3] * u.km)
assert_array_equal(s.differentials["s"].d_x, [2, 2, 2, 3, 4] * u.m / u.s)
assert_array_equal(s.differentials["s"].d_y, [-2, -2, -2, -3, -4] * u.m / u.s)
assert_array_equal(s.differentials["s"].d_z, [1, 1, 1, 1, 1] * u.m / u.s)
s2 = s.represent_as(SphericalRepresentation, SphericalDifferential)
s[0] = s2[3]
assert_allclose_quantity(s.x, [3, 2, 2, 3, 4] * u.m)
assert_allclose_quantity(s.y, [-3, -2, -2, -3, -4] * u.m)
assert_allclose_quantity(s.z, [3, 3, 3, 3, 3] * u.km)
assert_allclose_quantity(s.differentials["s"].d_x, [3, 2, 2, 3, 4] * u.m / u.s)
assert_allclose_quantity(
s.differentials["s"].d_y, [-3, -2, -2, -3, -4] * u.m / u.s
)
assert_allclose_quantity(s.differentials["s"].d_z, [1, 1, 1, 1, 1] * u.m / u.s)
s3 = CartesianRepresentation(
s.xyz,
differentials={
"s": d,
"s2": CartesianDifferential(np.ones((3, 5)) * u.m / u.s**2),
},
)
with pytest.raises(ValueError, match="same differentials"):
s[0] = s3[2]
s4 = SphericalRepresentation(
0.0 * u.deg,
0.0 * u.deg,
1.0 * u.kpc,
differentials=RadialDifferential(10 * u.km / u.s),
)
with pytest.raises(ValueError, match="loss of information"):
s[0] = s4
def test_transform(self):
d1 = CartesianDifferential(
d_x=[1, 2] * u.km / u.s, d_y=[3, 4] * u.km / u.s, d_z=[5, 6] * u.km / u.s
)
r1 = CartesianRepresentation(
x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=[5, 6] * u.kpc, differentials=d1
)
r2 = r1.transform(matrices["general"])
d2 = r2.differentials["s"]
assert_allclose_quantity(d2.d_x, [22.0, 28] * u.km / u.s)
assert_allclose_quantity(d2.d_y, [49, 64] * u.km / u.s)
assert_allclose_quantity(d2.d_z, [76, 100.0] * u.km / u.s)
def test_with_differentials(self):
# make sure with_differential correctly creates a new copy with the same
# differential
cr = CartesianRepresentation([1, 2, 3] * u.kpc)
diff = CartesianDifferential([0.1, 0.2, 0.3] * u.km / u.s)
cr2 = cr.with_differentials(diff)
assert cr.differentials != cr2.differentials
assert cr2.differentials["s"] is diff
# make sure it works even if a differential is present already
diff2 = CartesianDifferential([0.1, 0.2, 0.3] * u.m / u.s)
cr3 = CartesianRepresentation([1, 2, 3] * u.kpc, differentials=diff)
cr4 = cr3.with_differentials(diff2)
assert cr4.differentials["s"] != cr3.differentials["s"]
assert cr4.differentials["s"] == diff2
# also ensure a *scalar* differential will works
cr5 = cr.with_differentials(diff)
assert len(cr5.differentials) == 1
assert cr5.differentials["s"] == diff
# make sure we don't update the original representation's dict
d1 = CartesianDifferential(*np.random.random((3, 5)), unit=u.km / u.s)
d2 = CartesianDifferential(*np.random.random((3, 5)), unit=u.km / u.s**2)
r1 = CartesianRepresentation(
*np.random.random((3, 5)), unit=u.pc, differentials=d1
)
r2 = r1.with_differentials(d2)
assert r1.differentials["s"] is r2.differentials["s"]
assert "s2" not in r1.differentials
assert "s2" in r2.differentials
def test_repr_with_differentials():
diff = CartesianDifferential([0.1, 0.2, 0.3] * u.km / u.s)
cr = CartesianRepresentation([1, 2, 3] * u.kpc, differentials=diff)
assert "has differentials w.r.t.: 's'" in repr(cr)
def test_to_cartesian():
"""
Test that to_cartesian drops the differential.
"""
sd = SphericalDifferential(d_lat=1 * u.deg, d_lon=2 * u.deg, d_distance=10 * u.m)
sr = SphericalRepresentation(
lat=1 * u.deg, lon=2 * u.deg, distance=10 * u.m, differentials=sd
)
cart = sr.to_cartesian()
assert cart.get_name() == "cartesian"
assert not cart.differentials
@pytest.fixture
def unitphysics():
"""
This fixture is used
"""
had_unit = False
if hasattr(PhysicsSphericalRepresentation, "_unit_representation"):
orig = PhysicsSphericalRepresentation._unit_representation
had_unit = True
class UnitPhysicsSphericalRepresentation(BaseRepresentation):
attr_classes = {"phi": Angle, "theta": Angle}
def __init__(self, *args, copy=True, **kwargs):
super().__init__(*args, copy=copy, **kwargs)
# Wrap/validate phi/theta
if copy:
self._phi = self._phi.wrap_at(360 * u.deg)
else:
# necessary because the above version of `wrap_at` has to be a copy
self._phi.wrap_at(360 * u.deg, inplace=True)
if np.any(self._theta < 0.0 * u.deg) or np.any(self._theta > 180.0 * u.deg):
raise ValueError(
"Inclination angle(s) must be within 0 deg <= angle <= 180 deg, "
f"got {self._theta.to(u.degree)}"
)
@property
def phi(self):
return self._phi
@property
def theta(self):
return self._theta
def unit_vectors(self):
sinphi, cosphi = np.sin(self.phi), np.cos(self.phi)
sintheta, costheta = np.sin(self.theta), np.cos(self.theta)
return {
"phi": CartesianRepresentation(-sinphi, cosphi, 0.0, copy=False),
"theta": CartesianRepresentation(
costheta * cosphi, costheta * sinphi, -sintheta, copy=False
),
}
def scale_factors(self):
sintheta = np.sin(self.theta)
l = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return {"phi", sintheta, "theta", l}
def to_cartesian(self):
x = np.sin(self.theta) * np.cos(self.phi)
y = np.sin(self.theta) * np.sin(self.phi)
z = np.cos(self.theta)
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
s = np.hypot(cart.x, cart.y)
phi = np.arctan2(cart.y, cart.x)
theta = np.arctan2(s, cart.z)
return cls(phi=phi, theta=theta, copy=False)
def norm(self):
return u.Quantity(np.ones(self.shape), u.dimensionless_unscaled, copy=False)
PhysicsSphericalRepresentation._unit_representation = (
UnitPhysicsSphericalRepresentation
)
yield UnitPhysicsSphericalRepresentation
if had_unit:
PhysicsSphericalRepresentation._unit_representation = orig
else:
del PhysicsSphericalRepresentation._unit_representation
# remove from the module-level representations, if present
REPRESENTATION_CLASSES.pop(UnitPhysicsSphericalRepresentation.get_name(), None)
def test_unitphysics(unitphysics):
obj = unitphysics(phi=0 * u.deg, theta=10 * u.deg)
objkw = unitphysics(phi=0 * u.deg, theta=10 * u.deg)
assert objkw.phi == obj.phi
assert objkw.theta == obj.theta
asphys = obj.represent_as(PhysicsSphericalRepresentation)
assert asphys.phi == obj.phi
assert_allclose(asphys.theta, obj.theta)
assert_allclose_quantity(asphys.r, 1 * u.dimensionless_unscaled)
assph = obj.represent_as(SphericalRepresentation)
assert assph.lon == obj.phi
assert_allclose_quantity(assph.lat, 80 * u.deg)
assert_allclose_quantity(assph.distance, 1 * u.dimensionless_unscaled)
with pytest.raises(TypeError, match="got multiple values"):
unitphysics(1 * u.deg, 2 * u.deg, theta=10)
with pytest.raises(TypeError, match="unexpected keyword.*parrot"):
unitphysics(1 * u.deg, 2 * u.deg, parrot=10)
def test_distance_warning(recwarn):
SphericalRepresentation(1 * u.deg, 2 * u.deg, 1 * u.kpc)
with pytest.raises(ValueError) as excinfo:
SphericalRepresentation(1 * u.deg, 2 * u.deg, -1 * u.kpc)
assert "Distance must be >= 0" in str(excinfo.value)
# second check is because the "originating" ValueError says the above,
# while the representation one includes the below
assert "you must explicitly pass" in str(excinfo.value)
def test_dtype_preservation_in_indexing():
# Regression test for issue #8614 (fixed in #8876)
xyz = np.array([[1, 0, 0], [0.9, 0.1, 0]], dtype="f4")
cr = CartesianRepresentation(xyz, xyz_axis=-1, unit="km")
assert cr.xyz.dtype == xyz.dtype
cr0 = cr[0]
# This used to fail.
assert cr0.xyz.dtype == xyz.dtype
class TestInfo:
def setup_class(cls):
cls.rep = SphericalRepresentation([0, 1] * u.deg, [2, 3] * u.deg, 10 * u.pc)
cls.diff = SphericalDifferential(
[10, 20] * u.mas / u.yr, [30, 40] * u.mas / u.yr, [50, 60] * u.km / u.s
)
cls.rep_w_diff = SphericalRepresentation(cls.rep, differentials=cls.diff)
def test_info_unit(self):
assert self.rep.info.unit == "deg, deg, pc"
assert self.diff.info.unit == "mas / yr, mas / yr, km / s"
assert self.rep_w_diff.info.unit == "deg, deg, pc"
@pytest.mark.parametrize("item", ["rep", "diff", "rep_w_diff"])
def test_roundtrip(self, item):
rep_or_diff = getattr(self, item)
as_dict = rep_or_diff.info._represent_as_dict()
new = rep_or_diff.__class__.info._construct_from_dict(as_dict)
assert np.all(representation_equal(new, rep_or_diff))
@pytest.mark.parametrize(
"cls",
[
SphericalDifferential,
SphericalCosLatDifferential,
CylindricalDifferential,
PhysicsSphericalDifferential,
UnitSphericalDifferential,
UnitSphericalCosLatDifferential,
],
)
def test_differential_norm_noncartesian(cls):
# The norm of a non-Cartesian differential without specifying `base` should error
rep = cls(0, 0, 0)
with pytest.raises(ValueError, match=r"`base` must be provided .* " + cls.__name__):
rep.norm()
def test_differential_norm_radial():
# Unlike most non-Cartesian differentials, the norm of a radial differential does not require `base`
rep = RadialDifferential(1 * u.km / u.s)
assert_allclose_quantity(rep.norm(), 1 * u.km / u.s)
|
31805c1b0ec0779fde8f426ecead3502ec305fc916b2b1b2980a84d933e2293b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Accuracy tests for GCRS coordinate transformations, primarily to/from AltAz.
"""
import os
import warnings
from importlib import metadata
import erfa
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import (
CIRS,
GCRS,
HCRS,
ICRS,
ITRS,
TEME,
TETE,
AltAz,
CartesianDifferential,
CartesianRepresentation,
EarthLocation,
HADec,
HeliocentricMeanEcliptic,
PrecessedGeocentric,
SkyCoord,
SphericalRepresentation,
UnitSphericalRepresentation,
get_sun,
solar_system_ephemeris,
)
from astropy.coordinates.angle_utilities import golden_spiral_grid
from astropy.coordinates.builtin_frames.intermediate_rotation_transforms import (
cirs_to_itrs_mat,
gcrs_to_cirs_mat,
get_location_gcrs,
tete_to_itrs_mat,
)
from astropy.coordinates.builtin_frames.utils import get_jd12
from astropy.coordinates.solar_system import (
_apparent_position_in_true_coordinates,
get_body,
)
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.time import Time
from astropy.units import allclose
from astropy.utils import iers
from astropy.utils.compat.optional_deps import HAS_JPLEPHEM
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
CI = os.environ.get("CI", False) == "true"
def test_icrs_cirs():
"""
Check a few cases of ICRS<->CIRS for consistency.
Also includes the CIRS<->CIRS transforms at different times, as those go
through ICRS
"""
usph = golden_spiral_grid(200)
dist = np.linspace(0.0, 1, len(usph)) * u.pc
inod = ICRS(usph)
iwd = ICRS(ra=usph.lon, dec=usph.lat, distance=dist)
cframe1 = CIRS()
cirsnod = inod.transform_to(cframe1) # uses the default time
# first do a round-tripping test
inod2 = cirsnod.transform_to(ICRS())
assert_allclose(inod.ra, inod2.ra)
assert_allclose(inod.dec, inod2.dec)
# now check that a different time yields different answers
cframe2 = CIRS(obstime=Time("J2005"))
cirsnod2 = inod.transform_to(cframe2)
assert not allclose(cirsnod.ra, cirsnod2.ra, rtol=1e-8)
assert not allclose(cirsnod.dec, cirsnod2.dec, rtol=1e-8)
# parallax effects should be included, so with and w/o distance should be different
cirswd = iwd.transform_to(cframe1)
assert not allclose(cirswd.ra, cirsnod.ra, rtol=1e-8)
assert not allclose(cirswd.dec, cirsnod.dec, rtol=1e-8)
# and the distance should transform at least somehow
assert not allclose(cirswd.distance, iwd.distance, rtol=1e-8)
# now check that the cirs self-transform works as expected
cirsnod3 = cirsnod.transform_to(cframe1) # should be a no-op
assert_allclose(cirsnod.ra, cirsnod3.ra)
assert_allclose(cirsnod.dec, cirsnod3.dec)
cirsnod4 = cirsnod.transform_to(cframe2) # should be different
assert not allclose(cirsnod4.ra, cirsnod.ra, rtol=1e-8)
assert not allclose(cirsnod4.dec, cirsnod.dec, rtol=1e-8)
cirsnod5 = cirsnod4.transform_to(cframe1) # should be back to the same
assert_allclose(cirsnod.ra, cirsnod5.ra)
assert_allclose(cirsnod.dec, cirsnod5.dec)
usph = golden_spiral_grid(200)
dist = np.linspace(0.5, 1, len(usph)) * u.pc
icrs_coords = [ICRS(usph), ICRS(usph.lon, usph.lat, distance=dist)]
gcrs_frames = [GCRS(), GCRS(obstime=Time("J2005"))]
@pytest.mark.parametrize("icoo", icrs_coords)
def test_icrs_gcrs(icoo):
"""
Check ICRS<->GCRS for consistency
"""
gcrscoo = icoo.transform_to(gcrs_frames[0]) # uses the default time
# first do a round-tripping test
icoo2 = gcrscoo.transform_to(ICRS())
assert_allclose(icoo.distance, icoo2.distance)
assert_allclose(icoo.ra, icoo2.ra)
assert_allclose(icoo.dec, icoo2.dec)
assert isinstance(icoo2.data, icoo.data.__class__)
# now check that a different time yields different answers
gcrscoo2 = icoo.transform_to(gcrs_frames[1])
assert not allclose(gcrscoo.ra, gcrscoo2.ra, rtol=1e-8, atol=1e-10 * u.deg)
assert not allclose(gcrscoo.dec, gcrscoo2.dec, rtol=1e-8, atol=1e-10 * u.deg)
# now check that the cirs self-transform works as expected
gcrscoo3 = gcrscoo.transform_to(gcrs_frames[0]) # should be a no-op
assert_allclose(gcrscoo.ra, gcrscoo3.ra)
assert_allclose(gcrscoo.dec, gcrscoo3.dec)
gcrscoo4 = gcrscoo.transform_to(gcrs_frames[1]) # should be different
assert not allclose(gcrscoo4.ra, gcrscoo.ra, rtol=1e-8, atol=1e-10 * u.deg)
assert not allclose(gcrscoo4.dec, gcrscoo.dec, rtol=1e-8, atol=1e-10 * u.deg)
gcrscoo5 = gcrscoo4.transform_to(gcrs_frames[0]) # should be back to the same
assert_allclose(gcrscoo.ra, gcrscoo5.ra, rtol=1e-8, atol=1e-10 * u.deg)
assert_allclose(gcrscoo.dec, gcrscoo5.dec, rtol=1e-8, atol=1e-10 * u.deg)
# also make sure that a GCRS with a different geoloc/geovel gets a different answer
# roughly a moon-like frame
gframe3 = GCRS(obsgeoloc=[385000.0, 0, 0] * u.km, obsgeovel=[1, 0, 0] * u.km / u.s)
gcrscoo6 = icoo.transform_to(gframe3) # should be different
assert not allclose(gcrscoo.ra, gcrscoo6.ra, rtol=1e-8, atol=1e-10 * u.deg)
assert not allclose(gcrscoo.dec, gcrscoo6.dec, rtol=1e-8, atol=1e-10 * u.deg)
icooviag3 = gcrscoo6.transform_to(ICRS()) # and now back to the original
assert_allclose(icoo.ra, icooviag3.ra)
assert_allclose(icoo.dec, icooviag3.dec)
@pytest.mark.parametrize("gframe", gcrs_frames)
def test_icrs_gcrs_dist_diff(gframe):
"""
Check that with and without distance give different ICRS<->GCRS answers
"""
gcrsnod = icrs_coords[0].transform_to(gframe)
gcrswd = icrs_coords[1].transform_to(gframe)
# parallax effects should be included, so with and w/o distance should be different
assert not allclose(gcrswd.ra, gcrsnod.ra, rtol=1e-8, atol=1e-10 * u.deg)
assert not allclose(gcrswd.dec, gcrsnod.dec, rtol=1e-8, atol=1e-10 * u.deg)
# and the distance should transform at least somehow
assert not allclose(
gcrswd.distance, icrs_coords[1].distance, rtol=1e-8, atol=1e-10 * u.pc
)
def test_cirs_to_altaz():
"""
Check the basic CIRS<->AltAz transforms. More thorough checks implicitly
happen in `test_iau_fullstack`
"""
from astropy.coordinates import EarthLocation
usph = golden_spiral_grid(200)
dist = np.linspace(0.5, 1, len(usph)) * u.pc
cirs = CIRS(usph, obstime="J2000")
crepr = SphericalRepresentation(lon=usph.lon, lat=usph.lat, distance=dist)
cirscart = CIRS(
crepr, obstime=cirs.obstime, representation_type=CartesianRepresentation
)
loc = EarthLocation(lat=0 * u.deg, lon=0 * u.deg, height=0 * u.m)
altazframe = AltAz(location=loc, obstime=Time("J2005"))
cirs2 = cirs.transform_to(altazframe).transform_to(cirs)
cirs3 = cirscart.transform_to(altazframe).transform_to(cirs)
# check round-tripping
assert_allclose(cirs.ra, cirs2.ra)
assert_allclose(cirs.dec, cirs2.dec)
assert_allclose(cirs.ra, cirs3.ra)
assert_allclose(cirs.dec, cirs3.dec)
def test_cirs_to_hadec():
"""
Check the basic CIRS<->HADec transforms.
"""
from astropy.coordinates import EarthLocation
usph = golden_spiral_grid(200)
dist = np.linspace(0.5, 1, len(usph)) * u.pc
cirs = CIRS(usph, obstime="J2000")
crepr = SphericalRepresentation(lon=usph.lon, lat=usph.lat, distance=dist)
cirscart = CIRS(
crepr, obstime=cirs.obstime, representation_type=CartesianRepresentation
)
loc = EarthLocation(lat=0 * u.deg, lon=0 * u.deg, height=0 * u.m)
hadecframe = HADec(location=loc, obstime=Time("J2005"))
cirs2 = cirs.transform_to(hadecframe).transform_to(cirs)
cirs3 = cirscart.transform_to(hadecframe).transform_to(cirs)
# check round-tripping
assert_allclose(cirs.ra, cirs2.ra)
assert_allclose(cirs.dec, cirs2.dec)
assert_allclose(cirs.ra, cirs3.ra)
assert_allclose(cirs.dec, cirs3.dec)
def test_itrs_topo_to_altaz_with_refraction():
loc = EarthLocation(lat=0 * u.deg, lon=0 * u.deg, height=0 * u.m)
usph = golden_spiral_grid(200)
dist = np.linspace(1.0, 1000.0, len(usph)) * u.au
icrs = ICRS(ra=usph.lon, dec=usph.lat, distance=dist)
altaz_frame1 = AltAz(obstime="J2000", location=loc)
altaz_frame2 = AltAz(
obstime="J2000", location=loc, pressure=1000.0 * u.hPa, relative_humidity=0.5
)
cirs_frame = CIRS(obstime="J2000", location=loc)
itrs_frame = ITRS(location=loc)
# Normal route
# No Refraction
altaz1 = icrs.transform_to(altaz_frame1)
# Refraction added
altaz2 = icrs.transform_to(altaz_frame2)
# Refraction removed
cirs = altaz2.transform_to(cirs_frame)
altaz3 = cirs.transform_to(altaz_frame1)
# Through ITRS
# No Refraction
itrs = icrs.transform_to(itrs_frame)
altaz11 = itrs.transform_to(altaz_frame1)
assert_allclose(altaz11.az - altaz1.az, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(altaz11.alt - altaz1.alt, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(altaz11.distance - altaz1.distance, 0 * u.cm, atol=10.0 * u.cm)
# Round trip
itrs11 = altaz11.transform_to(itrs_frame)
assert_allclose(itrs11.x, itrs.x)
assert_allclose(itrs11.y, itrs.y)
assert_allclose(itrs11.z, itrs.z)
# Refraction added
altaz22 = itrs.transform_to(altaz_frame2)
assert_allclose(altaz22.az - altaz2.az, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(altaz22.alt - altaz2.alt, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(altaz22.distance - altaz2.distance, 0 * u.cm, atol=10.0 * u.cm)
# Refraction removed
itrs = altaz22.transform_to(itrs_frame)
altaz33 = itrs.transform_to(altaz_frame1)
assert_allclose(altaz33.az - altaz3.az, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(altaz33.alt - altaz3.alt, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(altaz33.distance - altaz3.distance, 0 * u.cm, atol=10.0 * u.cm)
def test_itrs_topo_to_hadec_with_refraction():
loc = EarthLocation(lat=0 * u.deg, lon=0 * u.deg, height=0 * u.m)
usph = golden_spiral_grid(200)
dist = np.linspace(1.0, 1000.0, len(usph)) * u.au
icrs = ICRS(ra=usph.lon, dec=usph.lat, distance=dist)
hadec_frame1 = HADec(obstime="J2000", location=loc)
hadec_frame2 = HADec(
obstime="J2000", location=loc, pressure=1000.0 * u.hPa, relative_humidity=0.5
)
cirs_frame = CIRS(obstime="J2000", location=loc)
itrs_frame = ITRS(location=loc)
# Normal route
# No Refraction
hadec1 = icrs.transform_to(hadec_frame1)
# Refraction added
hadec2 = icrs.transform_to(hadec_frame2)
# Refraction removed
cirs = hadec2.transform_to(cirs_frame)
hadec3 = cirs.transform_to(hadec_frame1)
# Through ITRS
# No Refraction
itrs = icrs.transform_to(itrs_frame)
hadec11 = itrs.transform_to(hadec_frame1)
assert_allclose(hadec11.ha - hadec1.ha, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(hadec11.dec - hadec1.dec, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(hadec11.distance - hadec1.distance, 0 * u.cm, atol=10.0 * u.cm)
# Round trip
itrs11 = hadec11.transform_to(itrs_frame)
assert_allclose(itrs11.x, itrs.x)
assert_allclose(itrs11.y, itrs.y)
assert_allclose(itrs11.z, itrs.z)
# Refraction added
hadec22 = itrs.transform_to(hadec_frame2)
assert_allclose(hadec22.ha - hadec2.ha, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(hadec22.dec - hadec2.dec, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(hadec22.distance - hadec2.distance, 0 * u.cm, atol=10.0 * u.cm)
# Refraction removed
itrs = hadec22.transform_to(itrs_frame)
hadec33 = itrs.transform_to(hadec_frame1)
assert_allclose(hadec33.ha - hadec3.ha, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(hadec33.dec - hadec3.dec, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(hadec33.distance - hadec3.distance, 0 * u.cm, atol=10.0 * u.cm)
def test_gcrs_itrs():
"""
Check basic GCRS<->ITRS transforms for round-tripping.
"""
usph = golden_spiral_grid(200)
gcrs = GCRS(usph, obstime="J2000")
gcrs6 = GCRS(usph, obstime="J2006")
gcrs2 = gcrs.transform_to(ITRS()).transform_to(gcrs)
gcrs6_2 = gcrs6.transform_to(ITRS()).transform_to(gcrs)
assert_allclose(gcrs.ra, gcrs2.ra)
assert_allclose(gcrs.dec, gcrs2.dec)
# these should be different:
assert not allclose(gcrs.ra, gcrs6_2.ra, rtol=1e-8)
assert not allclose(gcrs.dec, gcrs6_2.dec, rtol=1e-8)
# also try with the cartesian representation
gcrsc = gcrs.realize_frame(gcrs.data)
gcrsc.representation_type = CartesianRepresentation
gcrsc2 = gcrsc.transform_to(ITRS()).transform_to(gcrsc)
assert_allclose(gcrsc.spherical.lon, gcrsc2.ra)
assert_allclose(gcrsc.spherical.lat, gcrsc2.dec)
def test_cirs_itrs():
"""
Check basic CIRS<->ITRS geocentric transforms for round-tripping.
"""
usph = golden_spiral_grid(200)
cirs = CIRS(usph, obstime="J2000")
cirs6 = CIRS(usph, obstime="J2006")
cirs2 = cirs.transform_to(ITRS()).transform_to(cirs)
cirs6_2 = cirs6.transform_to(ITRS()).transform_to(cirs) # different obstime
# just check round-tripping
assert_allclose(cirs.ra, cirs2.ra)
assert_allclose(cirs.dec, cirs2.dec)
assert not allclose(cirs.ra, cirs6_2.ra)
assert not allclose(cirs.dec, cirs6_2.dec)
def test_cirs_itrs_topo():
"""
Check basic CIRS<->ITRS topocentric transforms for round-tripping.
"""
loc = EarthLocation(lat=0 * u.deg, lon=0 * u.deg, height=0 * u.m)
usph = golden_spiral_grid(200)
cirs = CIRS(usph, obstime="J2000", location=loc)
cirs6 = CIRS(usph, obstime="J2006", location=loc)
cirs2 = cirs.transform_to(ITRS(location=loc)).transform_to(cirs)
# different obstime
cirs6_2 = cirs6.transform_to(ITRS(location=loc)).transform_to(cirs)
# just check round-tripping
assert_allclose(cirs.ra, cirs2.ra)
assert_allclose(cirs.dec, cirs2.dec)
assert not allclose(cirs.ra, cirs6_2.ra)
assert not allclose(cirs.dec, cirs6_2.dec)
def test_gcrs_cirs():
"""
Check GCRS<->CIRS transforms for round-tripping. More complicated than the
above two because it's multi-hop
"""
usph = golden_spiral_grid(200)
gcrs = GCRS(usph, obstime="J2000")
gcrs6 = GCRS(usph, obstime="J2006")
gcrs2 = gcrs.transform_to(CIRS()).transform_to(gcrs)
gcrs6_2 = gcrs6.transform_to(CIRS()).transform_to(gcrs)
assert_allclose(gcrs.ra, gcrs2.ra)
assert_allclose(gcrs.dec, gcrs2.dec)
# these should be different:
assert not allclose(gcrs.ra, gcrs6_2.ra, rtol=1e-8)
assert not allclose(gcrs.dec, gcrs6_2.dec, rtol=1e-8)
# now try explicit intermediate pathways and ensure they're all consistent
gcrs3 = (
gcrs.transform_to(ITRS())
.transform_to(CIRS())
.transform_to(ITRS())
.transform_to(gcrs)
)
assert_allclose(gcrs.ra, gcrs3.ra)
assert_allclose(gcrs.dec, gcrs3.dec)
gcrs4 = (
gcrs.transform_to(ICRS())
.transform_to(CIRS())
.transform_to(ICRS())
.transform_to(gcrs)
)
assert_allclose(gcrs.ra, gcrs4.ra)
assert_allclose(gcrs.dec, gcrs4.dec)
def test_gcrs_altaz():
"""
Check GCRS<->AltAz transforms for round-tripping. Has multiple paths
"""
from astropy.coordinates import EarthLocation
usph = golden_spiral_grid(128)
gcrs = GCRS(usph, obstime="J2000")[None] # broadcast with times below
# check array times sure N-d arrays work
times = Time(np.linspace(2456293.25, 2456657.25, 51) * u.day, format="jd")[:, None]
loc = EarthLocation(lon=10 * u.deg, lat=80.0 * u.deg)
aaframe = AltAz(obstime=times, location=loc)
aa1 = gcrs.transform_to(aaframe)
aa2 = gcrs.transform_to(ICRS()).transform_to(CIRS()).transform_to(aaframe)
aa3 = gcrs.transform_to(ITRS()).transform_to(CIRS()).transform_to(aaframe)
# make sure they're all consistent
assert_allclose(aa1.alt, aa2.alt)
assert_allclose(aa1.az, aa2.az)
assert_allclose(aa1.alt, aa3.alt)
assert_allclose(aa1.az, aa3.az)
def test_gcrs_hadec():
"""
Check GCRS<->HADec transforms for round-tripping. Has multiple paths
"""
from astropy.coordinates import EarthLocation
usph = golden_spiral_grid(128)
gcrs = GCRS(usph, obstime="J2000") # broadcast with times below
# check array times sure N-d arrays work
times = Time(np.linspace(2456293.25, 2456657.25, 51) * u.day, format="jd")[:, None]
loc = EarthLocation(lon=10 * u.deg, lat=80.0 * u.deg)
hdframe = HADec(obstime=times, location=loc)
hd1 = gcrs.transform_to(hdframe)
hd2 = gcrs.transform_to(ICRS()).transform_to(CIRS()).transform_to(hdframe)
hd3 = gcrs.transform_to(ITRS()).transform_to(CIRS()).transform_to(hdframe)
# make sure they're all consistent
assert_allclose(hd1.dec, hd2.dec)
assert_allclose(hd1.ha, hd2.ha)
assert_allclose(hd1.dec, hd3.dec)
assert_allclose(hd1.ha, hd3.ha)
def test_precessed_geocentric():
assert PrecessedGeocentric().equinox.jd == Time("J2000").jd
gcrs_coo = GCRS(180 * u.deg, 2 * u.deg, distance=10000 * u.km)
pgeo_coo = gcrs_coo.transform_to(PrecessedGeocentric())
assert np.abs(gcrs_coo.ra - pgeo_coo.ra) > 10 * u.marcsec
assert np.abs(gcrs_coo.dec - pgeo_coo.dec) > 10 * u.marcsec
assert_allclose(gcrs_coo.distance, pgeo_coo.distance)
gcrs_roundtrip = pgeo_coo.transform_to(GCRS())
assert_allclose(gcrs_coo.ra, gcrs_roundtrip.ra)
assert_allclose(gcrs_coo.dec, gcrs_roundtrip.dec)
assert_allclose(gcrs_coo.distance, gcrs_roundtrip.distance)
pgeo_coo2 = gcrs_coo.transform_to(PrecessedGeocentric(equinox="B1850"))
assert np.abs(gcrs_coo.ra - pgeo_coo2.ra) > 1.5 * u.deg
assert np.abs(gcrs_coo.dec - pgeo_coo2.dec) > 0.5 * u.deg
assert_allclose(gcrs_coo.distance, pgeo_coo2.distance)
gcrs2_roundtrip = pgeo_coo2.transform_to(GCRS())
assert_allclose(gcrs_coo.ra, gcrs2_roundtrip.ra)
assert_allclose(gcrs_coo.dec, gcrs2_roundtrip.dec)
assert_allclose(gcrs_coo.distance, gcrs2_roundtrip.distance)
def test_precessed_geocentric_different_obstime():
# Create two PrecessedGeocentric frames with different obstime
precessedgeo1 = PrecessedGeocentric(obstime="2021-09-07")
precessedgeo2 = PrecessedGeocentric(obstime="2021-06-07")
# GCRS->PrecessedGeocentric should give different results for the two frames
gcrs_coord = GCRS(10 * u.deg, 20 * u.deg, 3 * u.AU, obstime=precessedgeo1.obstime)
pg_coord1 = gcrs_coord.transform_to(precessedgeo1)
pg_coord2 = gcrs_coord.transform_to(precessedgeo2)
assert not pg_coord1.is_equivalent_frame(pg_coord2)
assert not allclose(pg_coord1.cartesian.xyz, pg_coord2.cartesian.xyz)
# Looping back to GCRS should return the original coordinate
loopback1 = pg_coord1.transform_to(gcrs_coord)
loopback2 = pg_coord2.transform_to(gcrs_coord)
assert loopback1.is_equivalent_frame(gcrs_coord)
assert loopback2.is_equivalent_frame(gcrs_coord)
assert_allclose(loopback1.cartesian.xyz, gcrs_coord.cartesian.xyz)
assert_allclose(loopback2.cartesian.xyz, gcrs_coord.cartesian.xyz)
# shared by parametrized tests below. Some use the whole AltAz, others use just obstime
totest_frames = [
# J2000 is often a default so this might work when others don't
AltAz(location=EarthLocation(-90 * u.deg, 65 * u.deg), obstime=Time("J2000")),
AltAz(location=EarthLocation(120 * u.deg, -35 * u.deg), obstime=Time("J2000")),
AltAz(
location=EarthLocation(-90 * u.deg, 65 * u.deg),
obstime=Time("2014-01-01 00:00:00"),
),
AltAz(
location=EarthLocation(-90 * u.deg, 65 * u.deg),
obstime=Time("2014-08-01 08:00:00"),
),
AltAz(
location=EarthLocation(120 * u.deg, -35 * u.deg),
obstime=Time("2014-01-01 00:00:00"),
),
]
MOONDIST = 385000 * u.km # approximate moon semi-major orbit axis of moon
MOONDIST_CART = CartesianRepresentation(
3**-0.5 * MOONDIST, 3**-0.5 * MOONDIST, 3**-0.5 * MOONDIST
)
# roughly earth orbital eccentricity, but with an added tolerance
EARTHECC = 0.017 + 0.005
@pytest.mark.parametrize("testframe", totest_frames)
def test_gcrs_altaz_sunish(testframe):
"""
Sanity-check that the sun is at a reasonable distance from any altaz
"""
sun = get_sun(testframe.obstime)
assert sun.frame.name == "gcrs"
# the .to(u.au) is not necessary, it just makes the asserts on failure more readable
assert (EARTHECC - 1) * u.au < sun.distance.to(u.au) < (EARTHECC + 1) * u.au
sunaa = sun.transform_to(testframe)
assert (EARTHECC - 1) * u.au < sunaa.distance.to(u.au) < (EARTHECC + 1) * u.au
@pytest.mark.parametrize("testframe", totest_frames)
def test_gcrs_altaz_moonish(testframe):
"""
Sanity-check that an object resembling the moon goes to the right place with
a GCRS->AltAz transformation
"""
moon = GCRS(MOONDIST_CART, obstime=testframe.obstime)
moonaa = moon.transform_to(testframe)
# now check that the distance change is similar to earth radius
assert 1000 * u.km < np.abs(moonaa.distance - moon.distance).to(u.au) < 7000 * u.km
# now check that it round-trips
moon2 = moonaa.transform_to(moon)
assert_allclose(moon.cartesian.xyz, moon2.cartesian.xyz)
# also should add checks that the alt/az are different for different earth locations
@pytest.mark.parametrize("testframe", totest_frames)
def test_gcrs_altaz_bothroutes(testframe):
"""
Repeat of both the moonish and sunish tests above to make sure the two
routes through the coordinate graph are consistent with each other
"""
sun = get_sun(testframe.obstime)
sunaa_viaicrs = sun.transform_to(ICRS()).transform_to(testframe)
sunaa_viaitrs = sun.transform_to(ITRS(obstime=testframe.obstime)).transform_to(
testframe
)
moon = GCRS(MOONDIST_CART, obstime=testframe.obstime)
moonaa_viaicrs = moon.transform_to(ICRS()).transform_to(testframe)
moonaa_viaitrs = moon.transform_to(ITRS(obstime=testframe.obstime)).transform_to(
testframe
)
assert_allclose(sunaa_viaicrs.cartesian.xyz, sunaa_viaitrs.cartesian.xyz)
assert_allclose(moonaa_viaicrs.cartesian.xyz, moonaa_viaitrs.cartesian.xyz)
@pytest.mark.parametrize("testframe", totest_frames)
def test_cirs_altaz_moonish(testframe):
"""
Sanity-check that an object resembling the moon goes to the right place with
a CIRS<->AltAz transformation
"""
moon = CIRS(MOONDIST_CART, obstime=testframe.obstime)
moonaa = moon.transform_to(testframe)
assert 1000 * u.km < np.abs(moonaa.distance - moon.distance).to(u.km) < 7000 * u.km
# now check that it round-trips
moon2 = moonaa.transform_to(moon)
assert_allclose(moon.cartesian.xyz, moon2.cartesian.xyz)
@pytest.mark.parametrize("testframe", totest_frames)
def test_cirs_altaz_nodist(testframe):
"""
Check that a UnitSphericalRepresentation coordinate round-trips for the
CIRS<->AltAz transformation.
"""
coo0 = CIRS(
UnitSphericalRepresentation(10 * u.deg, 20 * u.deg), obstime=testframe.obstime
)
# check that it round-trips
coo1 = coo0.transform_to(testframe).transform_to(coo0)
assert_allclose(coo0.cartesian.xyz, coo1.cartesian.xyz)
@pytest.mark.parametrize("testframe", totest_frames)
def test_cirs_icrs_moonish(testframe):
"""
check that something like the moon goes to about the right distance from the
ICRS origin when starting from CIRS
"""
moonish = CIRS(MOONDIST_CART, obstime=testframe.obstime)
moonicrs = moonish.transform_to(ICRS())
assert 0.97 * u.au < moonicrs.distance < 1.03 * u.au
@pytest.mark.parametrize("testframe", totest_frames)
def test_gcrs_icrs_moonish(testframe):
"""
check that something like the moon goes to about the right distance from the
ICRS origin when starting from GCRS
"""
moonish = GCRS(MOONDIST_CART, obstime=testframe.obstime)
moonicrs = moonish.transform_to(ICRS())
assert 0.97 * u.au < moonicrs.distance < 1.03 * u.au
@pytest.mark.parametrize("testframe", totest_frames)
def test_icrs_gcrscirs_sunish(testframe):
"""
check that the ICRS barycenter goes to about the right distance from various
~geocentric frames (other than testframe)
"""
# slight offset to avoid divide-by-zero errors
icrs = ICRS(0 * u.deg, 0 * u.deg, distance=10 * u.km)
gcrs = icrs.transform_to(GCRS(obstime=testframe.obstime))
assert (EARTHECC - 1) * u.au < gcrs.distance.to(u.au) < (EARTHECC + 1) * u.au
cirs = icrs.transform_to(CIRS(obstime=testframe.obstime))
assert (EARTHECC - 1) * u.au < cirs.distance.to(u.au) < (EARTHECC + 1) * u.au
itrs = icrs.transform_to(ITRS(obstime=testframe.obstime))
assert (
(EARTHECC - 1) * u.au < itrs.spherical.distance.to(u.au) < (EARTHECC + 1) * u.au
)
@pytest.mark.parametrize("testframe", totest_frames)
def test_icrs_altaz_moonish(testframe):
"""
Check that something expressed in *ICRS* as being moon-like goes to the
right AltAz distance
"""
# we use epv00 instead of get_sun because get_sun includes aberration
earth_pv_helio, earth_pv_bary = erfa.epv00(*get_jd12(testframe.obstime, "tdb"))
earth_icrs_xyz = earth_pv_bary[0] * u.au
moonoffset = [0, 0, MOONDIST.value] * MOONDIST.unit
moonish_icrs = ICRS(CartesianRepresentation(earth_icrs_xyz + moonoffset))
moonaa = moonish_icrs.transform_to(testframe)
# now check that the distance change is similar to earth radius
assert 1000 * u.km < np.abs(moonaa.distance - MOONDIST).to(u.au) < 7000 * u.km
def test_gcrs_self_transform_closeby():
"""
Tests GCRS self transform for objects which are nearby and thus
have reasonable parallax.
Moon positions were originally created using JPL DE432s ephemeris.
The two lunar positions (one geocentric, one at a defined location)
are created via a transformation from ICRS to two different GCRS frames.
We test that the GCRS-GCRS self transform can correctly map one GCRS
frame onto the other.
"""
t = Time("2014-12-25T07:00")
moon_geocentric = SkyCoord(
GCRS(
318.10579159 * u.deg,
-11.65281165 * u.deg,
365042.64880308 * u.km,
obstime=t,
)
)
# this is the location of the Moon as seen from La Palma
obsgeoloc = [-5592982.59658935, -63054.1948592, 3059763.90102216] * u.m
obsgeovel = [4.59798494, -407.84677071, 0.0] * u.m / u.s
moon_lapalma = SkyCoord(
GCRS(
318.7048445 * u.deg,
-11.98761996 * u.deg,
369722.8231031 * u.km,
obstime=t,
obsgeoloc=obsgeoloc,
obsgeovel=obsgeovel,
)
)
transformed = moon_geocentric.transform_to(moon_lapalma.frame)
delta = transformed.separation_3d(moon_lapalma)
assert_allclose(delta, 0.0 * u.m, atol=1 * u.m)
def test_teme_itrf():
"""
Test case transform from TEME to ITRF.
Test case derives from example on appendix C of Vallado, Crawford, Hujsak & Kelso (2006).
See https://celestrak.com/publications/AIAA/2006-6753/AIAA-2006-6753-Rev2.pdf
"""
v_itrf = CartesianDifferential(
-3.225636520, -2.872451450, 5.531924446, unit=u.km / u.s
)
p_itrf = CartesianRepresentation(
-1033.479383,
7901.2952740,
6380.35659580,
unit=u.km,
differentials={"s": v_itrf},
)
t = Time("2004-04-06T07:51:28.386")
teme = ITRS(p_itrf, obstime=t).transform_to(TEME(obstime=t))
v_teme = CartesianDifferential(
-4.746131487, 0.785818041, 5.531931288, unit=u.km / u.s
)
p_teme = CartesianRepresentation(
5094.18016210,
6127.64465050,
6380.34453270,
unit=u.km,
differentials={"s": v_teme},
)
assert_allclose(
teme.cartesian.without_differentials().xyz,
p_teme.without_differentials().xyz,
atol=30 * u.cm,
)
assert_allclose(
teme.cartesian.differentials["s"].d_xyz,
p_teme.differentials["s"].d_xyz,
atol=1.0 * u.cm / u.s,
)
# test round trip
itrf = teme.transform_to(ITRS(obstime=t))
assert_allclose(
itrf.cartesian.without_differentials().xyz,
p_itrf.without_differentials().xyz,
atol=100 * u.cm,
)
assert_allclose(
itrf.cartesian.differentials["s"].d_xyz,
p_itrf.differentials["s"].d_xyz,
atol=1 * u.cm / u.s,
)
def test_precessedgeocentric_loopback():
from_coo = PrecessedGeocentric(
1 * u.deg, 2 * u.deg, 3 * u.AU, obstime="2001-01-01", equinox="2001-01-01"
)
# Change just the obstime
to_frame = PrecessedGeocentric(obstime="2001-06-30", equinox="2001-01-01")
explicit_coo = from_coo.transform_to(ICRS()).transform_to(to_frame)
implicit_coo = from_coo.transform_to(to_frame)
# Confirm that the explicit transformation changes the coordinate
assert not allclose(explicit_coo.ra, from_coo.ra, rtol=1e-10)
assert not allclose(explicit_coo.dec, from_coo.dec, rtol=1e-10)
assert not allclose(explicit_coo.distance, from_coo.distance, rtol=1e-10)
# Confirm that the loopback matches the explicit transformation
assert_allclose(explicit_coo.ra, implicit_coo.ra, rtol=1e-10)
assert_allclose(explicit_coo.dec, implicit_coo.dec, rtol=1e-10)
assert_allclose(explicit_coo.distance, implicit_coo.distance, rtol=1e-10)
# Change just the equinox
to_frame = PrecessedGeocentric(obstime="2001-01-01", equinox="2001-06-30")
explicit_coo = from_coo.transform_to(ICRS()).transform_to(to_frame)
implicit_coo = from_coo.transform_to(to_frame)
# Confirm that the explicit transformation changes the direction but not the distance
assert not allclose(explicit_coo.ra, from_coo.ra, rtol=1e-10)
assert not allclose(explicit_coo.dec, from_coo.dec, rtol=1e-10)
assert allclose(explicit_coo.distance, from_coo.distance, rtol=1e-10)
# Confirm that the loopback matches the explicit transformation
assert_allclose(explicit_coo.ra, implicit_coo.ra, rtol=1e-10)
assert_allclose(explicit_coo.dec, implicit_coo.dec, rtol=1e-10)
assert_allclose(explicit_coo.distance, implicit_coo.distance, rtol=1e-10)
def test_teme_loopback():
from_coo = TEME(1 * u.AU, 2 * u.AU, 3 * u.AU, obstime="2001-01-01")
to_frame = TEME(obstime="2001-06-30")
explicit_coo = from_coo.transform_to(ICRS()).transform_to(to_frame)
implicit_coo = from_coo.transform_to(to_frame)
# Confirm that the explicit transformation changes the coordinate
assert not allclose(explicit_coo.cartesian.xyz, from_coo.cartesian.xyz, rtol=1e-10)
# Confirm that the loopback matches the explicit transformation
assert_allclose(explicit_coo.cartesian.xyz, implicit_coo.cartesian.xyz, rtol=1e-10)
@pytest.mark.remote_data
def test_earth_orientation_table(monkeypatch):
"""Check that we can set the IERS table used as Earth Reference.
Use the here and now to be sure we get a difference.
"""
monkeypatch.setattr("astropy.utils.iers.conf.auto_download", True)
t = Time.now()
location = EarthLocation(lat=0 * u.deg, lon=0 * u.deg)
altaz = AltAz(location=location, obstime=t)
sc = SkyCoord(1 * u.deg, 2 * u.deg)
# Default: uses IERS_Auto, which will give a prediction.
# Note: tests run with warnings turned into errors, so it is
# meaningful if this passes.
if CI:
with warnings.catch_warnings():
# Server occasionally blocks IERS download in CI.
warnings.filterwarnings("ignore", message=r".*using local IERS-B.*")
# This also captures unclosed socket warning that is ignored in setup.cfg
warnings.filterwarnings("ignore", message=r".*unclosed.*")
altaz_auto = sc.transform_to(altaz)
else:
altaz_auto = sc.transform_to(altaz) # No warnings
with iers.earth_orientation_table.set(iers.IERS_B.open()):
with pytest.warns(AstropyWarning, match="after IERS data"):
altaz_b = sc.transform_to(altaz)
sep_b_auto = altaz_b.separation(altaz_auto)
assert_allclose(sep_b_auto, 0.0 * u.deg, atol=1 * u.arcsec)
assert sep_b_auto > 10 * u.microarcsecond
# Check we returned to regular IERS system.
altaz_auto2 = sc.transform_to(altaz)
assert altaz_auto2.separation(altaz_auto) == 0.0
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
def test_ephemerides():
"""
We test that using different ephemerides gives very similar results
for transformations
"""
t = Time("2014-12-25T07:00")
moon = SkyCoord(
GCRS(
318.10579159 * u.deg,
-11.65281165 * u.deg,
365042.64880308 * u.km,
obstime=t,
)
)
icrs_frame = ICRS()
hcrs_frame = HCRS(obstime=t)
ecl_frame = HeliocentricMeanEcliptic(equinox=t)
cirs_frame = CIRS(obstime=t)
moon_icrs_builtin = moon.transform_to(icrs_frame)
moon_hcrs_builtin = moon.transform_to(hcrs_frame)
moon_helioecl_builtin = moon.transform_to(ecl_frame)
moon_cirs_builtin = moon.transform_to(cirs_frame)
with solar_system_ephemeris.set("jpl"):
moon_icrs_jpl = moon.transform_to(icrs_frame)
moon_hcrs_jpl = moon.transform_to(hcrs_frame)
moon_helioecl_jpl = moon.transform_to(ecl_frame)
moon_cirs_jpl = moon.transform_to(cirs_frame)
# most transformations should differ by an amount which is
# non-zero but of order milliarcsecs
sep_icrs = moon_icrs_builtin.separation(moon_icrs_jpl)
sep_hcrs = moon_hcrs_builtin.separation(moon_hcrs_jpl)
sep_helioecl = moon_helioecl_builtin.separation(moon_helioecl_jpl)
sep_cirs = moon_cirs_builtin.separation(moon_cirs_jpl)
assert_allclose([sep_icrs, sep_hcrs, sep_helioecl], 0.0 * u.deg, atol=10 * u.mas)
assert all(
sep > 10 * u.microarcsecond for sep in (sep_icrs, sep_hcrs, sep_helioecl)
)
# CIRS should be the same
assert_allclose(sep_cirs, 0.0 * u.deg, atol=1 * u.microarcsecond)
def test_tete_transforms():
"""
We test the TETE transforms for proper behaviour here.
The TETE transforms are tested for accuracy against JPL Horizons in
test_solar_system.py. Here we are looking to check for consistency and
errors in the self transform.
"""
loc = EarthLocation.from_geodetic("-22°57'35.1", "-67°47'14.1", 5186 * u.m)
time = Time("2020-04-06T00:00")
p, v = loc.get_gcrs_posvel(time)
gcrs_frame = GCRS(obstime=time, obsgeoloc=p, obsgeovel=v)
moon = SkyCoord(
169.24113968 * u.deg,
10.86086666 * u.deg,
358549.25381755 * u.km,
frame=gcrs_frame,
)
tete_frame = TETE(obstime=time, location=loc)
# need to set obsgeoloc/vel explicitly or skycoord behaviour over-writes
tete_geo = TETE(obstime=time, location=EarthLocation(*([0, 0, 0] * u.km)))
# test self-transform by comparing to GCRS-TETE-ITRS-TETE route
tete_coo1 = moon.transform_to(tete_frame)
tete_coo2 = moon.transform_to(tete_geo)
assert_allclose(tete_coo1.separation_3d(tete_coo2), 0 * u.mm, atol=1 * u.mm)
# test TETE-ITRS transform by comparing GCRS-CIRS-ITRS to GCRS-TETE-ITRS
itrs1 = moon.transform_to(CIRS()).transform_to(ITRS())
itrs2 = moon.transform_to(TETE()).transform_to(ITRS())
assert_allclose(itrs1.separation_3d(itrs2), 0 * u.mm, atol=1 * u.mm)
# test round trip GCRS->TETE->GCRS
new_moon = moon.transform_to(TETE()).transform_to(moon)
assert_allclose(new_moon.separation_3d(moon), 0 * u.mm, atol=1 * u.mm)
# test round trip via ITRS
tete_rt = tete_coo1.transform_to(ITRS(obstime=time)).transform_to(tete_coo1)
assert_allclose(tete_rt.separation_3d(tete_coo1), 0 * u.mm, atol=1 * u.mm)
# ensure deprecated routine remains consistent
# make sure test raises warning!
with pytest.warns(AstropyDeprecationWarning, match="The use of"):
tete_alt = _apparent_position_in_true_coordinates(moon)
assert_allclose(tete_coo1.separation_3d(tete_alt), 0 * u.mm, atol=100 * u.mm)
def test_straight_overhead():
"""
With a precise CIRS<->Observed transformation this should give Alt=90 exactly
If the CIRS self-transform breaks it won't, due to improper treatment of aberration
"""
t = Time("J2010")
obj = EarthLocation(-1 * u.deg, 52 * u.deg, height=10.0 * u.km)
home = EarthLocation(-1 * u.deg, 52 * u.deg, height=0.0 * u.km)
# An object that appears straight overhead - FOR A GEOCENTRIC OBSERVER.
# Note, this won't be overhead for a topocentric observer because of
# aberration.
cirs_geo = obj.get_itrs(t).transform_to(CIRS(obstime=t))
# now get the Geocentric CIRS position of observatory
obsrepr = home.get_itrs(t).transform_to(CIRS(obstime=t)).cartesian
# topocentric CIRS position of a straight overhead object
cirs_repr = cirs_geo.cartesian - obsrepr
# create a CIRS object that appears straight overhead for a TOPOCENTRIC OBSERVER
topocentric_cirs_frame = CIRS(obstime=t, location=home)
cirs_topo = topocentric_cirs_frame.realize_frame(cirs_repr)
# Check AltAz (though Azimuth can be anything so is not tested).
aa = cirs_topo.transform_to(AltAz(obstime=t, location=home))
assert_allclose(aa.alt, 90 * u.deg, atol=1 * u.uas, rtol=0)
# Check HADec.
hd = cirs_topo.transform_to(HADec(obstime=t, location=home))
assert_allclose(hd.ha, 0 * u.hourangle, atol=1 * u.uas, rtol=0)
assert_allclose(hd.dec, 52 * u.deg, atol=1 * u.uas, rtol=0)
def test_itrs_straight_overhead():
"""
With a precise ITRS<->Observed transformation this should give Alt=90 exactly
"""
t = Time("J2010")
obj = EarthLocation(-1 * u.deg, 52 * u.deg, height=10.0 * u.km)
home = EarthLocation(-1 * u.deg, 52 * u.deg, height=0.0 * u.km)
# An object that appears straight overhead - FOR A GEOCENTRIC OBSERVER.
itrs_geo = obj.get_itrs(t).cartesian
# now get the Geocentric ITRS position of observatory
obsrepr = home.get_itrs(t).cartesian
# topocentric ITRS position of a straight overhead object
itrs_repr = itrs_geo - obsrepr
# create a ITRS object that appears straight overhead for a TOPOCENTRIC OBSERVER
itrs_topo = ITRS(itrs_repr, obstime=t, location=home)
# Check AltAz (though Azimuth can be anything so is not tested).
aa = itrs_topo.transform_to(AltAz(obstime=t, location=home))
assert_allclose(aa.alt, 90 * u.deg, atol=1 * u.uas, rtol=0)
# Check HADec.
hd = itrs_topo.transform_to(HADec(obstime=t, location=home))
assert_allclose(hd.ha, 0 * u.hourangle, atol=1 * u.uas, rtol=0)
assert_allclose(hd.dec, 52 * u.deg, atol=1 * u.uas, rtol=0)
def jplephem_ge(minversion):
"""Check if jplephem is installed and has version >= minversion."""
# This is a separate routine since somehow with pyinstaller the stanza
# not HAS_JPLEPHEM or metadata.version('jplephem') < '2.15'
# leads to a module not found error.
try:
return HAS_JPLEPHEM and metadata.version("jplephem") >= minversion
except Exception:
return False
@pytest.mark.remote_data
@pytest.mark.skipif(not jplephem_ge("2.15"), reason="requires jplephem >= 2.15")
def test_aa_hd_high_precision():
"""These tests are provided by @mkbrewer - see issue #10356.
The code that produces them agrees very well (<0.5 mas) with SkyField once Polar motion
is turned off, but SkyField does not include polar motion, so a comparison to Skyfield
or JPL Horizons will be ~1" off.
The absence of polar motion within Skyfield and the disagreement between Skyfield and Horizons
make high precision comparisons to those codes difficult.
Updated 2020-11-29, after the comparison between codes became even better,
down to 100 nas.
NOTE: the agreement reflects consistency in approach between two codes,
not necessarily absolute precision. If this test starts failing, the
tolerance can and should be weakened *if* it is clear that the change is
due to an improvement (e.g., a new IAU precession model).
"""
lat = -22.959748 * u.deg
lon = -67.787260 * u.deg
elev = 5186 * u.m
loc = EarthLocation.from_geodetic(lon, lat, elev)
# Note: at this level of precision for the comparison, we have to include
# the location in the time, as it influences the transformation to TDB.
t = Time("2017-04-06T00:00:00.0", location=loc)
with solar_system_ephemeris.set("de430"):
moon = get_body("moon", t, loc)
moon_aa = moon.transform_to(AltAz(obstime=t, location=loc))
moon_hd = moon.transform_to(HADec(obstime=t, location=loc))
# Numbers from
# https://github.com/astropy/astropy/pull/11073#issuecomment-735486271
# updated in https://github.com/astropy/astropy/issues/11683
TARGET_AZ, TARGET_EL = 15.032673509956 * u.deg, 50.303110133923 * u.deg
TARGET_DISTANCE = 376252883.247239 * u.m
assert_allclose(moon_aa.az, TARGET_AZ, atol=0.1 * u.uas, rtol=0)
assert_allclose(moon_aa.alt, TARGET_EL, atol=0.1 * u.uas, rtol=0)
assert_allclose(moon_aa.distance, TARGET_DISTANCE, atol=0.1 * u.mm, rtol=0)
ha, dec = erfa.ae2hd(
moon_aa.az.to_value(u.radian),
moon_aa.alt.to_value(u.radian),
lat.to_value(u.radian),
)
ha = u.Quantity(ha, u.radian, copy=False)
dec = u.Quantity(dec, u.radian, copy=False)
assert_allclose(moon_hd.ha, ha, atol=0.1 * u.uas, rtol=0)
assert_allclose(moon_hd.dec, dec, atol=0.1 * u.uas, rtol=0)
def test_aa_high_precision_nodata():
"""
These tests are designed to ensure high precision alt-az transforms.
They are a slight fudge since the target values come from astropy itself. They are generated
with a version of the code that passes the tests above, but for the internal solar system
ephemerides to avoid the use of remote data.
"""
# Last updated when switching to erfa 2.0.0 and its moon98 function.
TARGET_AZ, TARGET_EL = 15.03231495 * u.deg, 50.3027193 * u.deg
lat = -22.959748 * u.deg
lon = -67.787260 * u.deg
elev = 5186 * u.m
loc = EarthLocation.from_geodetic(lon, lat, elev)
t = Time("2017-04-06T00:00:00.0")
moon = get_body("moon", t, loc)
moon_aa = moon.transform_to(AltAz(obstime=t, location=loc))
assert_allclose(moon_aa.az - TARGET_AZ, 0 * u.mas, atol=0.5 * u.mas)
assert_allclose(moon_aa.alt - TARGET_EL, 0 * u.mas, atol=0.5 * u.mas)
class TestGetLocationGCRS:
# TETE and CIRS use get_location_gcrs to get obsgeoloc and obsgeovel
# with knowledge of some of the matrices. Check that this is consistent
# with a direct transformation.
def setup_class(cls):
cls.loc = loc = EarthLocation.from_geodetic(
np.linspace(0, 360, 6) * u.deg, np.linspace(-90, 90, 6) * u.deg, 100 * u.m
)
cls.obstime = obstime = Time(np.linspace(2000, 2010, 6), format="jyear")
# Get comparison via a full transformation. We do not use any methods
# of EarthLocation, since those depend on the fast transform.
loc_itrs = ITRS(loc.x, loc.y, loc.z, obstime=obstime)
zeros = np.broadcast_to(0.0 * (u.km / u.s), (3,) + loc_itrs.shape, subok=True)
loc_itrs.data.differentials["s"] = CartesianDifferential(zeros)
loc_gcrs_cart = loc_itrs.transform_to(GCRS(obstime=obstime)).cartesian
cls.obsgeoloc = loc_gcrs_cart.without_differentials()
cls.obsgeovel = loc_gcrs_cart.differentials["s"].to_cartesian()
def check_obsgeo(self, obsgeoloc, obsgeovel):
assert_allclose(obsgeoloc.xyz, self.obsgeoloc.xyz, atol=0.1 * u.um, rtol=0.0)
assert_allclose(
obsgeovel.xyz, self.obsgeovel.xyz, atol=0.1 * u.mm / u.s, rtol=0.0
)
def test_get_gcrs_posvel(self):
# Really just a sanity check
self.check_obsgeo(*self.loc.get_gcrs_posvel(self.obstime))
def test_tete_quick(self):
# Following copied from intermediate_rotation_transforms.gcrs_to_tete
rbpn = erfa.pnm06a(*get_jd12(self.obstime, "tt"))
loc_gcrs_frame = get_location_gcrs(
self.loc, self.obstime, tete_to_itrs_mat(self.obstime, rbpn=rbpn), rbpn
)
self.check_obsgeo(loc_gcrs_frame.obsgeoloc, loc_gcrs_frame.obsgeovel)
def test_cirs_quick(self):
cirs_frame = CIRS(location=self.loc, obstime=self.obstime)
# Following copied from intermediate_rotation_transforms.gcrs_to_cirs
pmat = gcrs_to_cirs_mat(cirs_frame.obstime)
loc_gcrs_frame = get_location_gcrs(
self.loc, self.obstime, cirs_to_itrs_mat(cirs_frame.obstime), pmat
)
self.check_obsgeo(loc_gcrs_frame.obsgeoloc, loc_gcrs_frame.obsgeovel)
|
6ee0ebb449efa672bbffc295166c860e846d1d9e347cd5894fdb3d8f513428a7 | import pytest
from astropy import units as u
from astropy.coordinates import EarthLocation, Latitude, Longitude
from astropy.coordinates.sites import (
SiteRegistry,
get_builtin_sites,
get_downloaded_sites,
)
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import allclose as quantity_allclose
def test_builtin_sites():
reg = get_builtin_sites()
greenwich = reg["greenwich"]
lon, lat, el = greenwich.to_geodetic()
assert_quantity_allclose(lon, Longitude("0:0:0", unit=u.deg), atol=10 * u.arcsec)
assert_quantity_allclose(lat, Latitude("51:28:40", unit=u.deg), atol=1 * u.arcsec)
assert_quantity_allclose(el, 46 * u.m, atol=1 * u.m)
names = reg.names
assert "greenwich" in names
assert "example_site" in names
with pytest.raises(
KeyError,
match="Site 'nonexistent' not in database. Use the 'names' attribute to see",
):
reg["nonexistent"]
@pytest.mark.remote_data(source="astropy")
def test_online_sites():
reg = get_downloaded_sites()
keck = reg["keck"]
lon, lat, el = keck.to_geodetic()
assert_quantity_allclose(
lon, -Longitude("155:28.7", unit=u.deg), atol=0.001 * u.deg
)
assert_quantity_allclose(lat, Latitude("19:49.7", unit=u.deg), atol=0.001 * u.deg)
assert_quantity_allclose(el, 4160 * u.m, atol=1 * u.m)
names = reg.names
assert "keck" in names
assert "ctio" in names
# The JSON file contains `name` and `aliases` for each site, and astropy
# should use names from both, but not empty strings [#12721].
assert "" not in names
assert "Royal Observatory Greenwich" in names
with pytest.raises(
KeyError,
match="Site 'nonexistent' not in database. Use the 'names' attribute to see",
):
reg["nonexistent"]
with pytest.raises(
KeyError,
match="Site 'kec' not in database. Use the 'names' attribute to see available",
):
reg["kec"]
@pytest.mark.remote_data(source="astropy")
# this will *try* the online so we have to make it remote_data, even though it
# could fall back on the non-remote version
def test_EarthLocation_basic():
greenwichel = EarthLocation.of_site("greenwich")
lon, lat, el = greenwichel.to_geodetic()
assert_quantity_allclose(lon, Longitude("0:0:0", unit=u.deg), atol=10 * u.arcsec)
assert_quantity_allclose(lat, Latitude("51:28:40", unit=u.deg), atol=1 * u.arcsec)
assert_quantity_allclose(el, 46 * u.m, atol=1 * u.m)
names = EarthLocation.get_site_names()
assert "greenwich" in names
assert "example_site" in names
with pytest.raises(
KeyError,
match="Site 'nonexistent' not in database. Use EarthLocation.get_site_names",
):
EarthLocation.of_site("nonexistent")
def test_EarthLocation_state_offline():
EarthLocation._site_registry = None
EarthLocation._get_site_registry(force_builtin=True)
assert EarthLocation._site_registry is not None
oldreg = EarthLocation._site_registry
newreg = EarthLocation._get_site_registry()
assert oldreg is newreg
newreg = EarthLocation._get_site_registry(force_builtin=True)
assert oldreg is not newreg
@pytest.mark.remote_data(source="astropy")
def test_EarthLocation_state_online():
EarthLocation._site_registry = None
EarthLocation._get_site_registry(force_download=True)
assert EarthLocation._site_registry is not None
oldreg = EarthLocation._site_registry
newreg = EarthLocation._get_site_registry()
assert oldreg is newreg
newreg = EarthLocation._get_site_registry(force_download=True)
assert oldreg is not newreg
def test_registry():
reg = SiteRegistry()
assert len(reg.names) == 0
names = ["sitea", "site A"]
loc = EarthLocation.from_geodetic(lat=1 * u.deg, lon=2 * u.deg, height=3 * u.km)
reg.add_site(names, loc)
assert len(reg.names) == 2
loc1 = reg["SIteA"]
assert loc1 is loc
loc2 = reg["sIte a"]
assert loc2 is loc
def test_non_EarthLocation():
"""
A regression test for a typo bug pointed out at the bottom of
https://github.com/astropy/astropy/pull/4042
"""
class EarthLocation2(EarthLocation):
pass
# This lets keeps us from needing to do remote_data
# note that this does *not* mess up the registry for EarthLocation because
# registry is cached on a per-class basis
EarthLocation2._get_site_registry(force_builtin=True)
el2 = EarthLocation2.of_site("greenwich")
assert type(el2) is EarthLocation2
assert el2.info.name == "Royal Observatory Greenwich"
def check_builtin_matches_remote(download_url=True):
"""
This function checks that the builtin sites registry is consistent with the
remote registry (or a registry at some other location).
Note that current this is *not* run by the testing suite (because it
doesn't start with "test", and is instead meant to be used as a check
before merging changes in astropy-data)
"""
builtin_registry = EarthLocation._get_site_registry(force_builtin=True)
dl_registry = EarthLocation._get_site_registry(force_download=download_url)
in_dl = {}
matches = {}
for name in builtin_registry.names:
in_dl[name] = name in dl_registry
if in_dl[name]:
matches[name] = quantity_allclose(
builtin_registry[name].geocentric, dl_registry[name].geocentric
)
else:
matches[name] = False
if not all(matches.values()):
# this makes sure we actually see which don't match
print("In builtin registry but not in download:")
for name in in_dl:
if not in_dl[name]:
print(" ", name)
print("In both but not the same value:")
for name in matches:
if not matches[name] and in_dl[name]:
print(
" ",
name,
"builtin:",
builtin_registry[name],
"download:",
dl_registry[name],
)
assert False, (
"Builtin and download registry aren't consistent - failures printed to"
" stdout"
)
def test_meta_present():
reg = get_builtin_sites()
greenwich = reg["greenwich"]
assert (
greenwich.info.meta["source"]
== "Ordnance Survey via http://gpsinformation.net/main/greenwich.htm and UNESCO"
)
|
3a51b5fe2b4a6f238fcf30fe1d37c0bfbb0e879ec681c152424dd1d36e6642cd | """Unit tests for the astropy.coordinates.angle_utilities module"""
import pytest
import astropy.units as u
from astropy.coordinates.angle_utilities import (
golden_spiral_grid,
uniform_spherical_random_surface,
uniform_spherical_random_volume,
)
from astropy.utils import NumpyRNGContext
def test_golden_spiral_grid_input():
usph = golden_spiral_grid(size=100)
assert len(usph) == 100
@pytest.mark.parametrize(
"func", [uniform_spherical_random_surface, uniform_spherical_random_volume]
)
def test_uniform_spherical_random_input(func):
with NumpyRNGContext(42):
sph = func(size=100)
assert len(sph) == 100
def test_uniform_spherical_random_volume_input():
with NumpyRNGContext(42):
sph = uniform_spherical_random_volume(size=100, max_radius=1)
assert len(sph) == 100
assert sph.distance.unit == u.dimensionless_unscaled
assert sph.distance.max() <= 1.0
sph = uniform_spherical_random_volume(size=100, max_radius=4 * u.pc)
assert len(sph) == 100
assert sph.distance.max() <= 4 * u.pc
|
f84adee47e51ddebacac5f366db63eb0f57c91efc34617929db033eeb1d06433 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for miscellaneous functionality in the `funcs` module
"""
import numpy as np
import pytest
from numpy import testing as npt
from astropy import units as u
from astropy.time import Time
def test_sun():
"""
Test that `get_sun` works and it behaves roughly as it should (in GCRS)
"""
from astropy.coordinates.funcs import get_sun
northern_summer_solstice = Time("2010-6-21")
northern_winter_solstice = Time("2010-12-21")
equinox_1 = Time("2010-3-21")
equinox_2 = Time("2010-9-21")
gcrs1 = get_sun(equinox_1)
assert np.abs(gcrs1.dec.deg) < 1
gcrs2 = get_sun(
Time([northern_summer_solstice, equinox_2, northern_winter_solstice])
)
assert np.all(np.abs(gcrs2.dec - [23.5, 0, -23.5] * u.deg) < 1 * u.deg)
def test_constellations(recwarn):
from astropy.coordinates import FK5, ICRS, SkyCoord
from astropy.coordinates.funcs import get_constellation
inuma = ICRS(9 * u.hour, 65 * u.deg)
n_prewarn = len(recwarn)
res = get_constellation(inuma)
res_short = get_constellation(inuma, short_name=True)
assert len(recwarn) == n_prewarn # neither version should not make warnings
assert res == "Ursa Major"
assert res_short == "UMa"
assert isinstance(res, str) or getattr(res, "shape", None) == tuple()
# these are taken from the ReadMe for Roman 1987
ras = [9, 23.5, 5.12, 9.4555, 12.8888, 15.6687, 19, 6.2222]
decs = [65, -20, 9.12, -19.9, 22, -12.1234, -40, -81.1234]
shortnames = ["UMa", "Aqr", "Ori", "Hya", "Com", "Lib", "CrA", "Men"]
testcoos = FK5(ras * u.hour, decs * u.deg, equinox="B1950")
npt.assert_equal(get_constellation(testcoos, short_name=True), shortnames)
# test on a SkyCoord, *and* test Boötes, which is special in that it has a
# non-ASCII character
bootest = SkyCoord(15 * u.hour, 30 * u.deg, frame="icrs")
boores = get_constellation(bootest)
assert boores == "Boötes"
assert isinstance(boores, str) or getattr(boores, "shape", None) == tuple()
@pytest.mark.xfail
def test_constellation_edge_cases():
from astropy.coordinates import FK5
from astropy.coordinates.funcs import get_constellation
# Test edge cases close to borders, using B1875.0 coordinates
# Look for HMS / DMS roundoff-to-decimal issues from Roman (1987) data,
# and misuse of PrecessedGeocentric, as documented in
# https://github.com/astropy/astropy/issues/9855
# Define eight test points.
# The first four cross the boundary at 06h14m30 == 6.2416666666666... hours
# with Monoceros on the west side of Orion at Dec +3.0.
ras = [6.24100, 6.24160, 6.24166, 6.24171]
# aka ['6h14m27.6s' '6h14m29.76s' '6h14m29.976s' '6h14m30.156s']
decs = [3.0, 3.0, 3.0, 3.0]
# Correct constellations for given RA/Dec coordinates
shortnames = ["Ori", "Ori", "Ori", "Mon"]
# The second four sample northward along RA 22 hours, crossing the boundary
# at 86° 10' == 86.1666... degrees between Cepheus and Ursa Minor
decs += [86.16, 86.1666, 86.16668, 86.1668]
ras += [22.0, 22.0, 22.0, 22.0]
shortnames += ["Cep", "Cep", "Umi", "Umi"]
testcoos = FK5(ras * u.hour, decs * u.deg, equinox="B1875")
npt.assert_equal(
get_constellation(testcoos, short_name=True),
shortnames,
"get_constellation() error: misusing Roman approximations, vs IAU boundaries"
" from Delporte?",
)
# TODO: When that's fixed, add other tests with coords that are in different constellations
# depending on equinox
def test_concatenate():
from astropy.coordinates import FK5, ICRS, SkyCoord
from astropy.coordinates.funcs import concatenate
# Just positions
fk5 = FK5(1 * u.deg, 2 * u.deg)
sc = SkyCoord(3 * u.deg, 4 * u.deg, frame="fk5")
res = concatenate([fk5, sc])
np.testing.assert_allclose(res.ra, [1, 3] * u.deg)
np.testing.assert_allclose(res.dec, [2, 4] * u.deg)
with pytest.raises(TypeError):
concatenate(fk5)
with pytest.raises(TypeError):
concatenate(1 * u.deg)
# positions and velocities
fr = ICRS(
ra=10 * u.deg,
dec=11.0 * u.deg,
pm_ra_cosdec=12 * u.mas / u.yr,
pm_dec=13 * u.mas / u.yr,
)
sc = SkyCoord(
ra=20 * u.deg,
dec=21.0 * u.deg,
pm_ra_cosdec=22 * u.mas / u.yr,
pm_dec=23 * u.mas / u.yr,
)
res = concatenate([fr, sc])
with pytest.raises(ValueError):
concatenate([fr, fk5])
fr2 = ICRS(ra=10 * u.deg, dec=11.0 * u.deg)
with pytest.raises(ValueError):
concatenate([fr, fr2])
def test_concatenate_representations():
from astropy.coordinates import representation as r
from astropy.coordinates.funcs import concatenate_representations
# fmt: off
reps = [r.CartesianRepresentation([1, 2, 3.]*u.kpc),
r.SphericalRepresentation(lon=1*u.deg, lat=2.*u.deg,
distance=10*u.pc),
r.UnitSphericalRepresentation(lon=1*u.deg, lat=2.*u.deg),
r.CartesianRepresentation(np.ones((3, 100)) * u.kpc),
r.CartesianRepresentation(np.ones((3, 16, 8)) * u.kpc)]
reps.append(reps[0].with_differentials(
r.CartesianDifferential([1, 2, 3.] * u.km/u.s)))
reps.append(reps[1].with_differentials(
r.SphericalCosLatDifferential(1*u.mas/u.yr, 2*u.mas/u.yr, 3*u.km/u.s)))
reps.append(reps[2].with_differentials(
r.SphericalCosLatDifferential(1*u.mas/u.yr, 2*u.mas/u.yr, 3*u.km/u.s)))
reps.append(reps[2].with_differentials(
r.UnitSphericalCosLatDifferential(1*u.mas/u.yr, 2*u.mas/u.yr)))
reps.append(reps[2].with_differentials(
{'s': r.RadialDifferential(1*u.km/u.s)}))
reps.append(reps[3].with_differentials(
r.CartesianDifferential(*np.ones((3, 100)) * u.km/u.s)))
reps.append(reps[4].with_differentials(
r.CartesianDifferential(*np.ones((3, 16, 8)) * u.km/u.s)))
# fmt: on
# Test that combining all of the above with itself succeeds
for rep in reps:
if not rep.shape:
expected_shape = (2,)
else:
expected_shape = (2 * rep.shape[0],) + rep.shape[1:]
tmp = concatenate_representations((rep, rep))
assert tmp.shape == expected_shape
if "s" in rep.differentials:
assert tmp.differentials["s"].shape == expected_shape
# Try combining 4, just for something different
for rep in reps:
if not rep.shape:
expected_shape = (4,)
else:
expected_shape = (4 * rep.shape[0],) + rep.shape[1:]
tmp = concatenate_representations((rep, rep, rep, rep))
assert tmp.shape == expected_shape
if "s" in rep.differentials:
assert tmp.differentials["s"].shape == expected_shape
# Test that combining pairs fails
with pytest.raises(TypeError):
concatenate_representations((reps[0], reps[1]))
with pytest.raises(ValueError):
concatenate_representations((reps[0], reps[5]))
# Check that passing in a single object fails
with pytest.raises(TypeError):
concatenate_representations(reps[0])
def test_concatenate_representations_different_units():
from astropy.coordinates import representation as r
from astropy.coordinates.funcs import concatenate_representations
reps = [
r.CartesianRepresentation([1, 2, 3.0] * u.pc),
r.CartesianRepresentation([1, 2, 3.0] * u.kpc),
]
concat = concatenate_representations(reps)
assert concat.shape == (2,)
assert np.all(concat.xyz == ([[1.0, 2.0, 3.0], [1000.0, 2000.0, 3000.0]] * u.pc).T)
|
0e1c456ddcd2238684df9bd55b32b8276911a0c1e2ff56f00e886b15097a7639 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the SkyCoord class. Note that there are also SkyCoord tests in
test_api_ape5.py
"""
import copy
from copy import deepcopy
import numpy as np
import numpy.testing as npt
import pytest
from erfa import ErfaWarning
from astropy import units as u
from astropy.coordinates import (
FK4,
FK5,
GCRS,
ICRS,
AltAz,
Angle,
Attribute,
BaseCoordinateFrame,
CartesianRepresentation,
EarthLocation,
Galactic,
Latitude,
RepresentationMapping,
SkyCoord,
SphericalRepresentation,
UnitSphericalRepresentation,
frame_transform_graph,
)
from astropy.coordinates.representation import (
DUPLICATE_REPRESENTATIONS,
REPRESENTATION_CLASSES,
)
from astropy.coordinates.tests.helper import skycoord_equal
from astropy.coordinates.transformations import FunctionTransform
from astropy.io import fits
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.time import Time
from astropy.units import allclose as quantity_allclose
from astropy.utils import isiterable
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.wcs import WCS
RA = 1.0 * u.deg
DEC = 2.0 * u.deg
C_ICRS = ICRS(RA, DEC)
C_FK5 = C_ICRS.transform_to(FK5())
J2001 = Time("J2001")
def allclose(a, b, rtol=0.0, atol=None):
if atol is None:
atol = 1.0e-8 * getattr(a, "unit", 1.0)
return quantity_allclose(a, b, rtol, atol)
def setup_function(func):
func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES)
func.DUPLICATE_REPRESENTATIONS_ORIG = deepcopy(DUPLICATE_REPRESENTATIONS)
def teardown_function(func):
REPRESENTATION_CLASSES.clear()
REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG)
DUPLICATE_REPRESENTATIONS.clear()
DUPLICATE_REPRESENTATIONS.update(func.DUPLICATE_REPRESENTATIONS_ORIG)
def test_is_transformable_to_str_input():
"""Test method ``is_transformable_to`` with string input.
The only difference from the frame method of the same name is that
strings are allowed. As the frame tests cover ``is_transform_to``, here
we only test the added string option.
"""
# make example SkyCoord
c = SkyCoord(90 * u.deg, -11 * u.deg)
# iterate through some frames, checking consistency
names = frame_transform_graph.get_names()
for name in names:
frame = frame_transform_graph.lookup_name(name)()
assert c.is_transformable_to(name) == c.is_transformable_to(frame)
def test_transform_to():
for frame in (
FK5(),
FK5(equinox=Time("J1975.0")),
FK4(),
FK4(equinox=Time("J1975.0")),
SkyCoord(RA, DEC, frame="fk4", equinox="J1980"),
):
c_frame = C_ICRS.transform_to(frame)
s_icrs = SkyCoord(RA, DEC, frame="icrs")
s_frame = s_icrs.transform_to(frame)
assert allclose(c_frame.ra, s_frame.ra)
assert allclose(c_frame.dec, s_frame.dec)
assert allclose(c_frame.distance, s_frame.distance)
# set up for parametrized test
rt_sets = []
rt_frames = [ICRS, FK4, FK5, Galactic]
for rt_frame0 in rt_frames:
for rt_frame1 in rt_frames:
for equinox0 in (None, "J1975.0"):
for obstime0 in (None, "J1980.0"):
for equinox1 in (None, "J1975.0"):
for obstime1 in (None, "J1980.0"):
rt_sets.append(
(
rt_frame0,
rt_frame1,
equinox0,
equinox1,
obstime0,
obstime1,
)
)
rt_args = ("frame0", "frame1", "equinox0", "equinox1", "obstime0", "obstime1")
@pytest.mark.parametrize(rt_args, rt_sets)
def test_round_tripping(frame0, frame1, equinox0, equinox1, obstime0, obstime1):
"""
Test round tripping out and back using transform_to in every combination.
"""
attrs0 = {"equinox": equinox0, "obstime": obstime0}
attrs1 = {"equinox": equinox1, "obstime": obstime1}
# Remove None values
attrs0 = {k: v for k, v in attrs0.items() if v is not None}
attrs1 = {k: v for k, v in attrs1.items() if v is not None}
# Go out and back
sc = SkyCoord(RA, DEC, frame=frame0, **attrs0)
# Keep only frame attributes for frame1
attrs1 = {
attr: val for attr, val in attrs1.items() if attr in frame1.frame_attributes
}
sc2 = sc.transform_to(frame1(**attrs1))
# When coming back only keep frame0 attributes for transform_to
attrs0 = {
attr: val for attr, val in attrs0.items() if attr in frame0.frame_attributes
}
# also, if any are None, fill in with defaults
for attrnm in frame0.frame_attributes:
if attrs0.get(attrnm, None) is None:
if attrnm == "obstime" and frame0.get_frame_attr_defaults()[attrnm] is None:
if "equinox" in attrs0:
attrs0[attrnm] = attrs0["equinox"]
else:
attrs0[attrnm] = frame0.get_frame_attr_defaults()[attrnm]
sc_rt = sc2.transform_to(frame0(**attrs0))
if frame0 is Galactic:
assert allclose(sc.l, sc_rt.l)
assert allclose(sc.b, sc_rt.b)
else:
assert allclose(sc.ra, sc_rt.ra)
assert allclose(sc.dec, sc_rt.dec)
if equinox0:
assert type(sc.equinox) is Time and sc.equinox == sc_rt.equinox
if obstime0:
assert type(sc.obstime) is Time and sc.obstime == sc_rt.obstime
def test_coord_init_string():
"""
Spherical or Cartesian representation input coordinates.
"""
sc = SkyCoord("1d 2d")
assert allclose(sc.ra, 1 * u.deg)
assert allclose(sc.dec, 2 * u.deg)
sc = SkyCoord("1d", "2d")
assert allclose(sc.ra, 1 * u.deg)
assert allclose(sc.dec, 2 * u.deg)
sc = SkyCoord("1°2′3″", "2°3′4″")
assert allclose(sc.ra, Angle("1°2′3″"))
assert allclose(sc.dec, Angle("2°3′4″"))
sc = SkyCoord("1°2′3″ 2°3′4″")
assert allclose(sc.ra, Angle("1°2′3″"))
assert allclose(sc.dec, Angle("2°3′4″"))
with pytest.raises(ValueError) as err:
SkyCoord("1d 2d 3d")
assert "Cannot parse first argument data" in str(err.value)
sc1 = SkyCoord("8 00 00 +5 00 00.0", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc1, SkyCoord)
assert allclose(sc1.ra, Angle(120 * u.deg))
assert allclose(sc1.dec, Angle(5 * u.deg))
sc11 = SkyCoord("8h00m00s+5d00m00.0s", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc11, SkyCoord)
assert allclose(sc1.ra, Angle(120 * u.deg))
assert allclose(sc1.dec, Angle(5 * u.deg))
sc2 = SkyCoord("8 00 -5 00 00.0", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc2, SkyCoord)
assert allclose(sc2.ra, Angle(120 * u.deg))
assert allclose(sc2.dec, Angle(-5 * u.deg))
sc3 = SkyCoord("8 00 -5 00.6", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc3, SkyCoord)
assert allclose(sc3.ra, Angle(120 * u.deg))
assert allclose(sc3.dec, Angle(-5.01 * u.deg))
sc4 = SkyCoord("J080000.00-050036.00", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc4, SkyCoord)
assert allclose(sc4.ra, Angle(120 * u.deg))
assert allclose(sc4.dec, Angle(-5.01 * u.deg))
sc41 = SkyCoord("J080000+050036", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc41, SkyCoord)
assert allclose(sc41.ra, Angle(120 * u.deg))
assert allclose(sc41.dec, Angle(+5.01 * u.deg))
sc5 = SkyCoord("8h00.6m -5d00.6m", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc5, SkyCoord)
assert allclose(sc5.ra, Angle(120.15 * u.deg))
assert allclose(sc5.dec, Angle(-5.01 * u.deg))
sc6 = SkyCoord("8h00.6m -5d00.6m", unit=(u.hour, u.deg), frame="fk4")
assert isinstance(sc6, SkyCoord)
assert allclose(sc6.ra, Angle(120.15 * u.deg))
assert allclose(sc6.dec, Angle(-5.01 * u.deg))
sc61 = SkyCoord("8h00.6m-5d00.6m", unit=(u.hour, u.deg), frame="fk4")
assert isinstance(sc61, SkyCoord)
assert allclose(sc6.ra, Angle(120.15 * u.deg))
assert allclose(sc6.dec, Angle(-5.01 * u.deg))
sc61 = SkyCoord("8h00.6-5d00.6", unit=(u.hour, u.deg), frame="fk4")
assert isinstance(sc61, SkyCoord)
assert allclose(sc6.ra, Angle(120.15 * u.deg))
assert allclose(sc6.dec, Angle(-5.01 * u.deg))
sc7 = SkyCoord("J1874221.60+122421.6", unit=u.deg)
assert isinstance(sc7, SkyCoord)
assert allclose(sc7.ra, Angle(187.706 * u.deg))
assert allclose(sc7.dec, Angle(12.406 * u.deg))
with pytest.raises(ValueError):
SkyCoord("8 00 -5 00.6", unit=(u.deg, u.deg), frame="galactic")
def test_coord_init_unit():
"""
Test variations of the unit keyword.
"""
for unit in (
"deg",
"deg,deg",
" deg , deg ",
u.deg,
(u.deg, u.deg),
np.array(["deg", "deg"]),
):
sc = SkyCoord(1, 2, unit=unit)
assert allclose(sc.ra, Angle(1 * u.deg))
assert allclose(sc.dec, Angle(2 * u.deg))
for unit in (
"hourangle",
"hourangle,hourangle",
" hourangle , hourangle ",
u.hourangle,
[u.hourangle, u.hourangle],
):
sc = SkyCoord(1, 2, unit=unit)
assert allclose(sc.ra, Angle(15 * u.deg))
assert allclose(sc.dec, Angle(30 * u.deg))
for unit in ("hourangle,deg", (u.hourangle, u.deg)):
sc = SkyCoord(1, 2, unit=unit)
assert allclose(sc.ra, Angle(15 * u.deg))
assert allclose(sc.dec, Angle(2 * u.deg))
for unit in ("deg,deg,deg,deg", [u.deg, u.deg, u.deg, u.deg], None):
with pytest.raises(ValueError) as err:
SkyCoord(1, 2, unit=unit)
assert "Unit keyword must have one to three unit values" in str(err.value)
for unit in ("m", (u.m, u.deg), ""):
with pytest.raises(u.UnitsError) as err:
SkyCoord(1, 2, unit=unit)
def test_coord_init_list():
"""
Spherical or Cartesian representation input coordinates.
"""
sc = SkyCoord(
[("1d", "2d"), (1 * u.deg, 2 * u.deg), "1d 2d", ("1°", "2°"), "1° 2°"],
unit="deg",
)
assert allclose(sc.ra, Angle("1d"))
assert allclose(sc.dec, Angle("2d"))
with pytest.raises(ValueError) as err:
SkyCoord(["1d 2d 3d"])
assert "Cannot parse first argument data" in str(err.value)
with pytest.raises(ValueError) as err:
SkyCoord([("1d", "2d", "3d")])
assert "Cannot parse first argument data" in str(err.value)
sc = SkyCoord([1 * u.deg, 1 * u.deg], [2 * u.deg, 2 * u.deg])
assert allclose(sc.ra, Angle("1d"))
assert allclose(sc.dec, Angle("2d"))
with pytest.raises(
ValueError,
match="One or more elements of input sequence does not have a length",
):
SkyCoord([1 * u.deg, 2 * u.deg]) # this list is taken as RA w/ missing dec
def test_coord_init_array():
"""
Input in the form of a list array or numpy array
"""
for a in (["1 2", "3 4"], [["1", "2"], ["3", "4"]], [[1, 2], [3, 4]]):
sc = SkyCoord(a, unit="deg")
assert allclose(sc.ra - [1, 3] * u.deg, 0 * u.deg)
assert allclose(sc.dec - [2, 4] * u.deg, 0 * u.deg)
sc = SkyCoord(np.array(a), unit="deg")
assert allclose(sc.ra - [1, 3] * u.deg, 0 * u.deg)
assert allclose(sc.dec - [2, 4] * u.deg, 0 * u.deg)
def test_coord_init_representation():
"""
Spherical or Cartesian representation input coordinates.
"""
coord = SphericalRepresentation(lon=8 * u.deg, lat=5 * u.deg, distance=1 * u.kpc)
sc = SkyCoord(coord, frame="icrs")
assert allclose(sc.ra, coord.lon)
assert allclose(sc.dec, coord.lat)
assert allclose(sc.distance, coord.distance)
with pytest.raises(ValueError) as err:
SkyCoord(coord, frame="icrs", ra="1d")
assert "conflicts with keyword argument 'ra'" in str(err.value)
coord = CartesianRepresentation(1 * u.one, 2 * u.one, 3 * u.one)
sc = SkyCoord(coord, frame="icrs")
sc_cart = sc.represent_as(CartesianRepresentation)
assert allclose(sc_cart.x, 1.0)
assert allclose(sc_cart.y, 2.0)
assert allclose(sc_cart.z, 3.0)
def test_frame_init():
"""
Different ways of providing the frame.
"""
sc = SkyCoord(RA, DEC, frame="icrs")
assert sc.frame.name == "icrs"
sc = SkyCoord(RA, DEC, frame=ICRS)
assert sc.frame.name == "icrs"
sc = SkyCoord(sc)
assert sc.frame.name == "icrs"
sc = SkyCoord(C_ICRS)
assert sc.frame.name == "icrs"
SkyCoord(C_ICRS, frame="icrs")
assert sc.frame.name == "icrs"
with pytest.raises(ValueError) as err:
SkyCoord(C_ICRS, frame="galactic")
assert "Cannot override frame=" in str(err.value)
def test_equal():
obstime = "B1955"
sc1 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, obstime=obstime)
sc2 = SkyCoord([1, 20] * u.deg, [3, 4] * u.deg, obstime=obstime)
# Compare arrays and scalars
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
assert isinstance(v := (sc1[0] == sc2[0]), (bool, np.bool_)) and v
assert isinstance(v := (sc1[0] != sc2[0]), (bool, np.bool_)) and not v
# Broadcasting
eq = sc1[0] == sc2
ne = sc1[0] != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
# With diff only in velocity
sc1 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, radial_velocity=[1, 2] * u.km / u.s)
sc2 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, radial_velocity=[1, 20] * u.km / u.s)
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
assert isinstance(v := (sc1[0] == sc2[0]), (bool, np.bool_)) and v
assert isinstance(v := (sc1[0] != sc2[0]), (bool, np.bool_)) and not v
def test_equal_different_type():
sc1 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, obstime="B1955")
# Test equals and not equals operators against different types
assert sc1 != "a string"
assert not (sc1 == "a string")
def test_equal_exceptions():
sc1 = SkyCoord(1 * u.deg, 2 * u.deg, obstime="B1955")
sc2 = SkyCoord(1 * u.deg, 2 * u.deg)
with pytest.raises(
ValueError,
match=(
"cannot compare: extra frame attribute 'obstime' is not equivalent"
r" \(perhaps compare the frames directly to avoid this exception\)"
),
):
sc1 == sc2
# Note that this exception is the only one raised directly in SkyCoord.
# All others come from lower-level classes and are tested in test_frames.py.
def test_attr_inheritance():
"""
When initializing from an existing coord the representation attrs like
equinox should be inherited to the SkyCoord. If there is a conflict
then raise an exception.
"""
sc = SkyCoord(1, 2, frame="icrs", unit="deg", equinox="J1999", obstime="J2001")
sc2 = SkyCoord(sc)
assert sc2.equinox == sc.equinox
assert sc2.obstime == sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
sc2 = SkyCoord(sc.frame) # Doesn't have equinox there so we get FK4 defaults
assert sc2.equinox != sc.equinox
assert sc2.obstime != sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
sc = SkyCoord(1, 2, frame="fk4", unit="deg", equinox="J1999", obstime="J2001")
sc2 = SkyCoord(sc)
assert sc2.equinox == sc.equinox
assert sc2.obstime == sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
sc2 = SkyCoord(sc.frame) # sc.frame has equinox, obstime
assert sc2.equinox == sc.equinox
assert sc2.obstime == sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
@pytest.mark.parametrize("frame", ["fk4", "fk5", "icrs"])
def test_setitem_no_velocity(frame):
"""Test different flavors of item setting for a SkyCoord without a velocity
for different frames. Include a frame attribute that is sometimes an
actual frame attribute and sometimes an extra frame attribute.
"""
sc0 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, obstime="B1955", frame=frame)
sc2 = SkyCoord([10, 20] * u.deg, [30, 40] * u.deg, obstime="B1955", frame=frame)
sc1 = sc0.copy()
sc1[1] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [1, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [3, 30])
assert sc1.obstime == Time("B1955")
assert sc1.frame.name == frame
sc1 = sc0.copy()
sc1[:] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 30])
sc1 = sc0.copy()
sc1[:] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 20])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 40])
sc1 = sc0.copy()
sc1[[1, 0]] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [20, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [40, 30])
def test_setitem_initially_broadcast():
sc = SkyCoord(np.ones((2, 1)) * u.deg, np.ones((1, 3)) * u.deg)
sc[1, 1] = SkyCoord(0 * u.deg, 0 * u.deg)
expected = np.ones((2, 3)) * u.deg
expected[1, 1] = 0.0
assert np.all(sc.ra == expected)
assert np.all(sc.dec == expected)
def test_setitem_velocities():
"""Test different flavors of item setting for a SkyCoord with a velocity."""
sc0 = SkyCoord(
[1, 2] * u.deg,
[3, 4] * u.deg,
radial_velocity=[1, 2] * u.km / u.s,
obstime="B1950",
frame="fk4",
)
sc2 = SkyCoord(
[10, 20] * u.deg,
[30, 40] * u.deg,
radial_velocity=[10, 20] * u.km / u.s,
obstime="B1950",
frame="fk4",
)
sc1 = sc0.copy()
sc1[1] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [1, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [3, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [1, 10])
assert sc1.obstime == Time("B1950")
assert sc1.frame.name == "fk4"
sc1 = sc0.copy()
sc1[:] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [10, 10])
sc1 = sc0.copy()
sc1[:] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 20])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 40])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [10, 20])
sc1 = sc0.copy()
sc1[[1, 0]] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [20, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [40, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [20, 10])
def test_setitem_exceptions():
class SkyCoordSub(SkyCoord):
pass
obstime = "B1955"
sc0 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, frame="fk4")
sc2 = SkyCoord([10, 20] * u.deg, [30, 40] * u.deg, frame="fk4", obstime=obstime)
sc1 = SkyCoordSub(sc0)
with pytest.raises(
TypeError,
match="an only set from object of same class: SkyCoordSub vs. SkyCoord",
):
sc1[0] = sc2[0]
sc1 = SkyCoord(sc0.ra, sc0.dec, frame="fk4", obstime="B2001")
with pytest.raises(
ValueError, match="can only set frame item from an equivalent frame"
):
sc1.frame[0] = sc2.frame[0]
sc1 = SkyCoord(sc0.ra[0], sc0.dec[0], frame="fk4", obstime=obstime)
with pytest.raises(
TypeError, match="scalar 'FK4' frame object does not support item assignment"
):
sc1[0] = sc2[0]
# Different differentials
sc1 = SkyCoord(
[1, 2] * u.deg,
[3, 4] * u.deg,
pm_ra_cosdec=[1, 2] * u.mas / u.yr,
pm_dec=[3, 4] * u.mas / u.yr,
)
sc2 = SkyCoord(
[10, 20] * u.deg, [30, 40] * u.deg, radial_velocity=[10, 20] * u.km / u.s
)
with pytest.raises(
TypeError,
match=(
"can only set from object of same class: "
"UnitSphericalCosLatDifferential vs. RadialDifferential"
),
):
sc1[0] = sc2[0]
def test_insert():
sc0 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg)
sc1 = SkyCoord(5 * u.deg, 6 * u.deg)
sc3 = SkyCoord([10, 20] * u.deg, [30, 40] * u.deg)
sc4 = SkyCoord([[1, 2], [3, 4]] * u.deg, [[5, 6], [7, 8]] * u.deg)
sc5 = SkyCoord([[10, 2], [30, 4]] * u.deg, [[50, 6], [70, 8]] * u.deg)
# Insert a scalar
sc = sc0.insert(1, sc1)
assert skycoord_equal(sc, SkyCoord([1, 5, 2] * u.deg, [3, 6, 4] * u.deg))
# Insert length=2 array at start of array
sc = sc0.insert(0, sc3)
assert skycoord_equal(sc, SkyCoord([10, 20, 1, 2] * u.deg, [30, 40, 3, 4] * u.deg))
# Insert length=2 array at end of array
sc = sc0.insert(2, sc3)
assert skycoord_equal(sc, SkyCoord([1, 2, 10, 20] * u.deg, [3, 4, 30, 40] * u.deg))
# Multidimensional
sc = sc4.insert(1, sc5)
assert skycoord_equal(
sc,
SkyCoord(
[[1, 2], [10, 2], [30, 4], [3, 4]] * u.deg,
[[5, 6], [50, 6], [70, 8], [7, 8]] * u.deg,
),
)
def test_insert_exceptions():
sc0 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg)
sc1 = SkyCoord(5 * u.deg, 6 * u.deg)
# sc3 = SkyCoord([10, 20]*u.deg, [30, 40]*u.deg)
sc4 = SkyCoord([[1, 2], [3, 4]] * u.deg, [[5, 6], [7, 8]] * u.deg)
with pytest.raises(TypeError, match="cannot insert into scalar"):
sc1.insert(0, sc0)
with pytest.raises(ValueError, match="axis must be 0"):
sc0.insert(0, sc1, axis=1)
with pytest.raises(TypeError, match="obj arg must be an integer"):
sc0.insert(slice(None), sc0)
with pytest.raises(
IndexError, match="index -100 is out of bounds for axis 0 with size 2"
):
sc0.insert(-100, sc0)
# Bad shape
with pytest.raises(
ValueError,
match=r"could not broadcast input array from shape \(2,2\) into shape \(2,?\)",
):
sc0.insert(0, sc4)
def test_attr_conflicts():
"""
Check conflicts resolution between coordinate attributes and init kwargs.
"""
sc = SkyCoord(1, 2, frame="icrs", unit="deg", equinox="J1999", obstime="J2001")
# OK if attrs both specified but with identical values
SkyCoord(sc, equinox="J1999", obstime="J2001")
# OK because sc.frame doesn't have obstime
SkyCoord(sc.frame, equinox="J1999", obstime="J2100")
# Not OK if attrs don't match
with pytest.raises(ValueError) as err:
SkyCoord(sc, equinox="J1999", obstime="J2002")
assert "Coordinate attribute 'obstime'=" in str(err.value)
# Same game but with fk4 which has equinox and obstime frame attrs
sc = SkyCoord(1, 2, frame="fk4", unit="deg", equinox="J1999", obstime="J2001")
# OK if attrs both specified but with identical values
SkyCoord(sc, equinox="J1999", obstime="J2001")
# Not OK if SkyCoord attrs don't match
with pytest.raises(ValueError) as err:
SkyCoord(sc, equinox="J1999", obstime="J2002")
assert "Frame attribute 'obstime' has conflicting" in str(err.value)
# Not OK because sc.frame has different attrs
with pytest.raises(ValueError) as err:
SkyCoord(sc.frame, equinox="J1999", obstime="J2002")
assert "Frame attribute 'obstime' has conflicting" in str(err.value)
def test_frame_attr_getattr():
"""
When accessing frame attributes like equinox, the value should come
from self.frame when that object has the relevant attribute, otherwise
from self.
"""
sc = SkyCoord(1, 2, frame="icrs", unit="deg", equinox="J1999", obstime="J2001")
assert sc.equinox == "J1999" # Just the raw value (not validated)
assert sc.obstime == "J2001"
sc = SkyCoord(1, 2, frame="fk4", unit="deg", equinox="J1999", obstime="J2001")
assert sc.equinox == Time("J1999") # Coming from the self.frame object
assert sc.obstime == Time("J2001")
sc = SkyCoord(1, 2, frame="fk4", unit="deg", equinox="J1999")
assert sc.equinox == Time("J1999")
assert sc.obstime == Time("J1999")
def test_to_string():
"""
Basic testing of converting SkyCoord to strings. This just tests
for a single input coordinate and and 1-element list. It does not
test the underlying `Angle.to_string` method itself.
"""
coord = "1h2m3s 1d2m3s"
for wrap in (lambda x: x, lambda x: [x]):
sc = SkyCoord(wrap(coord))
assert sc.to_string() == wrap("15.5125 1.03417")
assert sc.to_string("dms") == wrap("15d30m45s 1d02m03s")
assert sc.to_string("hmsdms") == wrap("01h02m03s +01d02m03s")
with_kwargs = sc.to_string("hmsdms", precision=3, pad=True, alwayssign=True)
assert with_kwargs == wrap("+01h02m03.000s +01d02m03.000s")
@pytest.mark.parametrize("cls_other", [SkyCoord, ICRS])
def test_seps(cls_other):
sc1 = SkyCoord(0 * u.deg, 1 * u.deg)
sc2 = cls_other(0 * u.deg, 2 * u.deg)
sep = sc1.separation(sc2)
assert (sep - 1 * u.deg) / u.deg < 1e-10
with pytest.raises(ValueError):
sc1.separation_3d(sc2)
sc3 = SkyCoord(1 * u.deg, 1 * u.deg, distance=1 * u.kpc)
sc4 = cls_other(1 * u.deg, 1 * u.deg, distance=2 * u.kpc)
sep3d = sc3.separation_3d(sc4)
assert sep3d == 1 * u.kpc
def test_repr():
sc1 = SkyCoord(0 * u.deg, 1 * u.deg, frame="icrs")
sc2 = SkyCoord(1 * u.deg, 1 * u.deg, frame="icrs", distance=1 * u.kpc)
assert repr(sc1) == "<SkyCoord (ICRS): (ra, dec) in deg\n (0., 1.)>"
assert (
repr(sc2)
== "<SkyCoord (ICRS): (ra, dec, distance) in (deg, deg, kpc)\n (1., 1., 1.)>"
)
sc3 = SkyCoord(0.25 * u.deg, [1, 2.5] * u.deg, frame="icrs")
assert repr(sc3).startswith("<SkyCoord (ICRS): (ra, dec) in deg\n")
sc_default = SkyCoord(0 * u.deg, 1 * u.deg)
assert repr(sc_default) == "<SkyCoord (ICRS): (ra, dec) in deg\n (0., 1.)>"
def test_repr_altaz():
sc2 = SkyCoord(1 * u.deg, 1 * u.deg, frame="icrs", distance=1 * u.kpc)
loc = EarthLocation(-2309223 * u.m, -3695529 * u.m, -4641767 * u.m)
time = Time("2005-03-21 00:00:00")
sc4 = sc2.transform_to(AltAz(location=loc, obstime=time))
assert repr(sc4).startswith(
"<SkyCoord (AltAz: obstime=2005-03-21 00:00:00.000, "
"location=(-2309223., -3695529., -4641767.) m, pressure=0.0 hPa, "
"temperature=0.0 deg_C, relative_humidity=0.0, obswl=1.0 micron):"
" (az, alt, distance) in (deg, deg, kpc)\n"
)
def test_ops():
"""
Tests miscellaneous operations like `len`
"""
sc = SkyCoord(0 * u.deg, 1 * u.deg, frame="icrs")
sc_arr = SkyCoord(0 * u.deg, [1, 2] * u.deg, frame="icrs")
sc_empty = SkyCoord([] * u.deg, [] * u.deg, frame="icrs")
assert sc.isscalar
assert not sc_arr.isscalar
assert not sc_empty.isscalar
with pytest.raises(TypeError):
len(sc)
assert len(sc_arr) == 2
assert len(sc_empty) == 0
assert bool(sc)
assert bool(sc_arr)
assert not bool(sc_empty)
assert sc_arr[0].isscalar
assert len(sc_arr[:1]) == 1
# A scalar shouldn't be indexable
with pytest.raises(TypeError):
sc[0:]
# but it should be possible to just get an item
sc_item = sc[()]
assert sc_item.shape == ()
# and to turn it into an array
sc_1d = sc[np.newaxis]
assert sc_1d.shape == (1,)
with pytest.raises(TypeError):
iter(sc)
assert not isiterable(sc)
assert isiterable(sc_arr)
assert isiterable(sc_empty)
it = iter(sc_arr)
assert next(it).dec == sc_arr[0].dec
assert next(it).dec == sc_arr[1].dec
with pytest.raises(StopIteration):
next(it)
def test_none_transform():
"""
Ensure that transforming from a SkyCoord with no frame provided works like
ICRS
"""
sc = SkyCoord(0 * u.deg, 1 * u.deg)
sc_arr = SkyCoord(0 * u.deg, [1, 2] * u.deg)
sc2 = sc.transform_to(ICRS)
assert sc.ra == sc2.ra and sc.dec == sc2.dec
sc5 = sc.transform_to("fk5")
assert sc5.ra == sc2.transform_to("fk5").ra
sc_arr2 = sc_arr.transform_to(ICRS)
sc_arr5 = sc_arr.transform_to("fk5")
npt.assert_array_equal(sc_arr5.ra, sc_arr2.transform_to("fk5").ra)
def test_position_angle():
c1 = SkyCoord(0 * u.deg, 0 * u.deg)
c2 = SkyCoord(1 * u.deg, 0 * u.deg)
assert_allclose(c1.position_angle(c2) - 90.0 * u.deg, 0 * u.deg)
c3 = SkyCoord(1 * u.deg, 0.1 * u.deg)
assert c1.position_angle(c3) < 90 * u.deg
c4 = SkyCoord(0 * u.deg, 1 * u.deg)
assert_allclose(c1.position_angle(c4), 0 * u.deg)
carr1 = SkyCoord(0 * u.deg, [0, 1, 2] * u.deg)
carr2 = SkyCoord([-1, -2, -3] * u.deg, [0.1, 1.1, 2.1] * u.deg)
res = carr1.position_angle(carr2)
assert res.shape == (3,)
assert np.all(res < 360 * u.degree)
assert np.all(res > 270 * u.degree)
cicrs = SkyCoord(0 * u.deg, 0 * u.deg, frame="icrs")
cfk5 = SkyCoord(1 * u.deg, 0 * u.deg, frame="fk5")
# because of the frame transform, it's just a *bit* more than 90 degrees
assert cicrs.position_angle(cfk5) > 90.0 * u.deg
assert cicrs.position_angle(cfk5) < 91.0 * u.deg
def test_position_angle_directly():
"""Regression check for #3800: position_angle should accept floats."""
from astropy.coordinates.angle_utilities import position_angle
result = position_angle(10.0, 20.0, 10.0, 20.0)
assert result.unit is u.radian
assert result.value == 0.0
def test_sep_pa_equivalence():
"""Regression check for bug in #5702.
PA and separation from object 1 to 2 should be consistent with those
from 2 to 1
"""
cfk5 = SkyCoord(1 * u.deg, 0 * u.deg, frame="fk5")
cfk5B1950 = SkyCoord(1 * u.deg, 0 * u.deg, frame="fk5", equinox="B1950")
# test with both default and explicit equinox #5722 and #3106
sep_forward = cfk5.separation(cfk5B1950)
sep_backward = cfk5B1950.separation(cfk5)
assert sep_forward != 0 and sep_backward != 0
assert_allclose(sep_forward, sep_backward)
posang_forward = cfk5.position_angle(cfk5B1950)
posang_backward = cfk5B1950.position_angle(cfk5)
assert posang_forward != 0 and posang_backward != 0
assert 179 < (posang_forward - posang_backward).wrap_at(360 * u.deg).degree < 181
dcfk5 = SkyCoord(1 * u.deg, 0 * u.deg, frame="fk5", distance=1 * u.pc)
dcfk5B1950 = SkyCoord(
1 * u.deg, 0 * u.deg, frame="fk5", equinox="B1950", distance=1.0 * u.pc
)
sep3d_forward = dcfk5.separation_3d(dcfk5B1950)
sep3d_backward = dcfk5B1950.separation_3d(dcfk5)
assert sep3d_forward != 0 and sep3d_backward != 0
assert_allclose(sep3d_forward, sep3d_backward)
def test_directional_offset_by():
# Round-trip tests: where is sc2 from sc1?
# Use those offsets from sc1 and verify you get to sc2.
npoints = 7 # How many points when doing vectors of SkyCoords
for sc1 in [
SkyCoord(0 * u.deg, -90 * u.deg), # South pole
SkyCoord(0 * u.deg, 90 * u.deg), # North pole
SkyCoord(1 * u.deg, 2 * u.deg),
SkyCoord(
np.linspace(0, 359, npoints),
np.linspace(-90, 90, npoints),
unit=u.deg,
frame="fk4",
),
SkyCoord(
np.linspace(359, 0, npoints),
np.linspace(-90, 90, npoints),
unit=u.deg,
frame="icrs",
),
SkyCoord(
np.linspace(-3, 3, npoints),
np.linspace(-90, 90, npoints),
unit=(u.rad, u.deg),
frame="barycentricmeanecliptic",
),
]:
for sc2 in [
SkyCoord(5 * u.deg, 10 * u.deg),
SkyCoord(
np.linspace(0, 359, npoints),
np.linspace(-90, 90, npoints),
unit=u.deg,
frame="galactic",
),
]:
# Find the displacement from sc1 to sc2,
posang = sc1.position_angle(sc2)
sep = sc1.separation(sc2)
# then do the offset from sc1 and verify that you are at sc2
sc2a = sc1.directional_offset_by(position_angle=posang, separation=sep)
assert np.max(np.abs(sc2.separation(sc2a).arcsec)) < 1e-3
# Specific test cases
# Go over the North pole a little way, and
# over the South pole a long way, to get to same spot
sc1 = SkyCoord(0 * u.deg, 89 * u.deg)
for posang, sep in [(0 * u.deg, 2 * u.deg), (180 * u.deg, 358 * u.deg)]:
sc2 = sc1.directional_offset_by(posang, sep)
assert allclose([sc2.ra.degree, sc2.dec.degree], [180, 89])
# Go twice as far to ensure that dec is actually changing
# and that >360deg is supported
sc2 = sc1.directional_offset_by(posang, 2 * sep)
assert allclose([sc2.ra.degree, sc2.dec.degree], [180, 87])
# Verify that a separation of 180 deg in any direction gets to the antipode
# and 360 deg returns to start
sc1 = SkyCoord(10 * u.deg, 47 * u.deg)
for posang in np.linspace(0, 377, npoints):
sc2 = sc1.directional_offset_by(posang, 180 * u.deg)
assert allclose([sc2.ra.degree, sc2.dec.degree], [190, -47])
sc2 = sc1.directional_offset_by(posang, 360 * u.deg)
assert allclose([sc2.ra.degree, sc2.dec.degree], [10, 47])
# Verify that a 90 degree posang, which means East
# corresponds to an increase in RA, by ~separation/cos(dec) and
# a slight convergence to equator
sc1 = SkyCoord(10 * u.deg, 60 * u.deg)
sc2 = sc1.directional_offset_by(90 * u.deg, 1.0 * u.deg)
assert 11.9 < sc2.ra.degree < 12.0
assert 59.9 < sc2.dec.degree < 60.0
def test_table_to_coord():
"""
Checks "end-to-end" use of `Table` with `SkyCoord` - the `Quantity`
initializer is the intermediary that translate the table columns into
something coordinates understands.
(Regression test for #1762 )
"""
from astropy.table import Column, Table
t = Table()
t.add_column(Column(data=[1, 2, 3], name="ra", unit=u.deg))
t.add_column(Column(data=[4, 5, 6], name="dec", unit=u.deg))
c = SkyCoord(t["ra"], t["dec"])
assert allclose(c.ra.to(u.deg), [1, 2, 3] * u.deg)
assert allclose(c.dec.to(u.deg), [4, 5, 6] * u.deg)
def assert_quantities_allclose(coord, q1s, attrs):
"""
Compare two tuples of quantities. This assumes that the values in q1 are of
order(1) and uses atol=1e-13, rtol=0. It also asserts that the units of the
two quantities are the *same*, in order to check that the representation
output has the expected units.
"""
q2s = [getattr(coord, attr) for attr in attrs]
assert len(q1s) == len(q2s)
for q1, q2 in zip(q1s, q2s):
assert q1.shape == q2.shape
assert allclose(q1, q2, rtol=0, atol=1e-13 * q1.unit)
# Sets of inputs corresponding to Galactic frame
base_unit_attr_sets = [
("spherical", u.karcsec, u.karcsec, u.kpc, Latitude, "l", "b", "distance"),
("unitspherical", u.karcsec, u.karcsec, None, Latitude, "l", "b", None),
("physicsspherical", u.karcsec, u.karcsec, u.kpc, Angle, "phi", "theta", "r"),
("cartesian", u.km, u.km, u.km, u.Quantity, "u", "v", "w"),
("cylindrical", u.km, u.karcsec, u.km, Angle, "rho", "phi", "z"),
]
units_attr_sets = []
for base_unit_attr_set in base_unit_attr_sets:
repr_name = base_unit_attr_set[0]
for representation in (repr_name, REPRESENTATION_CLASSES[repr_name]):
for c1, c2, c3 in ((1, 2, 3), ([1], [2], [3])):
for arrayify in True, False:
if arrayify:
c1 = np.array(c1)
c2 = np.array(c2)
c3 = np.array(c3)
units_attr_sets.append(
base_unit_attr_set + (representation, c1, c2, c3)
)
units_attr_args = (
"repr_name",
"unit1",
"unit2",
"unit3",
"cls2",
"attr1",
"attr2",
"attr3",
"representation",
"c1",
"c2",
"c3",
)
@pytest.mark.parametrize(
units_attr_args, [x for x in units_attr_sets if x[0] != "unitspherical"]
)
def test_skycoord_three_components(
repr_name,
unit1,
unit2,
unit3,
cls2,
attr1,
attr2,
attr3,
representation,
c1,
c2,
c3,
):
"""
Tests positional inputs using components (COMP1, COMP2, COMP3)
and various representations. Use weird units and Galactic frame.
"""
sc = SkyCoord(
c1,
c2,
c3,
unit=(unit1, unit2, unit3),
representation_type=representation,
frame=Galactic,
)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
sc = SkyCoord(
1000 * c1 * u.Unit(unit1 / 1000),
cls2(c2, unit=unit2),
1000 * c3 * u.Unit(unit3 / 1000),
frame=Galactic,
unit=(unit1, unit2, unit3),
representation_type=representation,
)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
kwargs = {attr3: c3}
sc = SkyCoord(
c1,
c2,
unit=(unit1, unit2, unit3),
frame=Galactic,
representation_type=representation,
**kwargs,
)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
kwargs = {attr1: c1, attr2: c2, attr3: c3}
sc = SkyCoord(
frame=Galactic,
unit=(unit1, unit2, unit3),
representation_type=representation,
**kwargs,
)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
@pytest.mark.parametrize(
units_attr_args,
[x for x in units_attr_sets if x[0] in ("spherical", "unitspherical")],
)
def test_skycoord_spherical_two_components(
repr_name,
unit1,
unit2,
unit3,
cls2,
attr1,
attr2,
attr3,
representation,
c1,
c2,
c3,
):
"""
Tests positional inputs using components (COMP1, COMP2) for spherical
representations. Use weird units and Galactic frame.
"""
sc = SkyCoord(
c1, c2, unit=(unit1, unit2), frame=Galactic, representation_type=representation
)
assert_quantities_allclose(sc, (c1 * unit1, c2 * unit2), (attr1, attr2))
sc = SkyCoord(
1000 * c1 * u.Unit(unit1 / 1000),
cls2(c2, unit=unit2),
frame=Galactic,
unit=(unit1, unit2, unit3),
representation_type=representation,
)
assert_quantities_allclose(sc, (c1 * unit1, c2 * unit2), (attr1, attr2))
kwargs = {attr1: c1, attr2: c2}
sc = SkyCoord(
frame=Galactic,
unit=(unit1, unit2),
representation_type=representation,
**kwargs,
)
assert_quantities_allclose(sc, (c1 * unit1, c2 * unit2), (attr1, attr2))
@pytest.mark.parametrize(
units_attr_args, [x for x in units_attr_sets if x[0] != "unitspherical"]
)
def test_galactic_three_components(
repr_name,
unit1,
unit2,
unit3,
cls2,
attr1,
attr2,
attr3,
representation,
c1,
c2,
c3,
):
"""
Tests positional inputs using components (COMP1, COMP2, COMP3)
and various representations. Use weird units and Galactic frame.
"""
sc = Galactic(
1000 * c1 * u.Unit(unit1 / 1000),
cls2(c2, unit=unit2),
1000 * c3 * u.Unit(unit3 / 1000),
representation_type=representation,
)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
kwargs = {attr3: c3 * unit3}
sc = Galactic(c1 * unit1, c2 * unit2, representation_type=representation, **kwargs)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
kwargs = {attr1: c1 * unit1, attr2: c2 * unit2, attr3: c3 * unit3}
sc = Galactic(representation_type=representation, **kwargs)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
@pytest.mark.parametrize(
units_attr_args,
[x for x in units_attr_sets if x[0] in ("spherical", "unitspherical")],
)
def test_galactic_spherical_two_components(
repr_name,
unit1,
unit2,
unit3,
cls2,
attr1,
attr2,
attr3,
representation,
c1,
c2,
c3,
):
"""
Tests positional inputs using components (COMP1, COMP2) for spherical
representations. Use weird units and Galactic frame.
"""
sc = Galactic(
1000 * c1 * u.Unit(unit1 / 1000),
cls2(c2, unit=unit2),
representation_type=representation,
)
assert_quantities_allclose(sc, (c1 * unit1, c2 * unit2), (attr1, attr2))
sc = Galactic(c1 * unit1, c2 * unit2, representation_type=representation)
assert_quantities_allclose(sc, (c1 * unit1, c2 * unit2), (attr1, attr2))
kwargs = {attr1: c1 * unit1, attr2: c2 * unit2}
sc = Galactic(representation_type=representation, **kwargs)
assert_quantities_allclose(sc, (c1 * unit1, c2 * unit2), (attr1, attr2))
@pytest.mark.parametrize(
("repr_name", "unit1", "unit2", "unit3", "cls2", "attr1", "attr2", "attr3"),
[x for x in base_unit_attr_sets if x[0] != "unitspherical"],
)
def test_skycoord_coordinate_input(
repr_name, unit1, unit2, unit3, cls2, attr1, attr2, attr3
):
c1, c2, c3 = 1, 2, 3
sc = SkyCoord(
[(c1, c2, c3)],
unit=(unit1, unit2, unit3),
representation_type=repr_name,
frame="galactic",
)
assert_quantities_allclose(
sc, ([c1] * unit1, [c2] * unit2, [c3] * unit3), (attr1, attr2, attr3)
)
c1, c2, c3 = 1 * unit1, 2 * unit2, 3 * unit3
sc = SkyCoord([(c1, c2, c3)], representation_type=repr_name, frame="galactic")
assert_quantities_allclose(
sc, ([1] * unit1, [2] * unit2, [3] * unit3), (attr1, attr2, attr3)
)
def test_skycoord_string_coordinate_input():
sc = SkyCoord("01 02 03 +02 03 04", unit="deg", representation_type="unitspherical")
assert_quantities_allclose(
sc,
(Angle("01:02:03", unit="deg"), Angle("02:03:04", unit="deg")),
("ra", "dec"),
)
sc = SkyCoord(
["01 02 03 +02 03 04"], unit="deg", representation_type="unitspherical"
)
assert_quantities_allclose(
sc,
(Angle(["01:02:03"], unit="deg"), Angle(["02:03:04"], unit="deg")),
("ra", "dec"),
)
def test_units():
sc = SkyCoord(1, 2, 3, unit="m", representation_type="cartesian") # All get meters
assert sc.x.unit is u.m
assert sc.y.unit is u.m
assert sc.z.unit is u.m
# All get u.m
sc = SkyCoord(1, 2 * u.km, 3, unit="m", representation_type="cartesian")
assert sc.x.unit is u.m
assert sc.y.unit is u.m
assert sc.z.unit is u.m
sc = SkyCoord(1, 2, 3, unit=u.m, representation_type="cartesian") # All get u.m
assert sc.x.unit is u.m
assert sc.y.unit is u.m
assert sc.z.unit is u.m
sc = SkyCoord(1, 2, 3, unit="m, km, pc", representation_type="cartesian")
assert_quantities_allclose(sc, (1 * u.m, 2 * u.km, 3 * u.pc), ("x", "y", "z"))
with pytest.raises(u.UnitsError) as err:
SkyCoord(1, 2, 3, unit=(u.m, u.m), representation_type="cartesian")
assert "should have matching physical types" in str(err.value)
SkyCoord(1, 2, 3, unit=(u.m, u.km, u.pc), representation_type="cartesian")
assert_quantities_allclose(sc, (1 * u.m, 2 * u.km, 3 * u.pc), ("x", "y", "z"))
@pytest.mark.xfail
def test_units_known_fail():
# should fail but doesn't => corner case oddity
with pytest.raises(u.UnitsError):
SkyCoord(1, 2, 3, unit=u.deg, representation_type="spherical")
def test_nodata_failure():
with pytest.raises(ValueError):
SkyCoord()
@pytest.mark.parametrize(("mode", "origin"), [("wcs", 0), ("all", 0), ("all", 1)])
def test_wcs_methods(mode, origin):
from astropy.utils.data import get_pkg_data_contents
from astropy.wcs import WCS
from astropy.wcs.utils import pixel_to_skycoord
header = get_pkg_data_contents(
"../../wcs/tests/data/maps/1904-66_TAN.hdr", encoding="binary"
)
wcs = WCS(header)
ref = SkyCoord(0.1 * u.deg, -89.0 * u.deg, frame="icrs")
xp, yp = ref.to_pixel(wcs, mode=mode, origin=origin)
# WCS is in FK5 so we need to transform back to ICRS
new = pixel_to_skycoord(xp, yp, wcs, mode=mode, origin=origin).transform_to("icrs")
assert_allclose(new.ra.degree, ref.ra.degree)
assert_allclose(new.dec.degree, ref.dec.degree)
# also try to round-trip with `from_pixel`
scnew = SkyCoord.from_pixel(xp, yp, wcs, mode=mode, origin=origin).transform_to(
"icrs"
)
assert_allclose(scnew.ra.degree, ref.ra.degree)
assert_allclose(scnew.dec.degree, ref.dec.degree)
# Also make sure the right type comes out
class SkyCoord2(SkyCoord):
pass
scnew2 = SkyCoord2.from_pixel(xp, yp, wcs, mode=mode, origin=origin)
assert scnew.__class__ is SkyCoord
assert scnew2.__class__ is SkyCoord2
def test_frame_attr_transform_inherit():
"""
Test that frame attributes get inherited as expected during transform.
Driven by #3106.
"""
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK5)
c2 = c.transform_to(FK4)
assert c2.equinox.value == "B1950.000"
assert c2.obstime.value == "B1950.000"
c2 = c.transform_to(FK4(equinox="J1975", obstime="J1980"))
assert c2.equinox.value == "J1975.000"
assert c2.obstime.value == "J1980.000"
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4)
c2 = c.transform_to(FK5)
assert c2.equinox.value == "J2000.000"
assert c2.obstime is None
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4, obstime="J1980")
c2 = c.transform_to(FK5)
assert c2.equinox.value == "J2000.000"
assert c2.obstime.value == "J1980.000"
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4, equinox="J1975", obstime="J1980")
c2 = c.transform_to(FK5)
assert c2.equinox.value == "J1975.000"
assert c2.obstime.value == "J1980.000"
c2 = c.transform_to(FK5(equinox="J1990"))
assert c2.equinox.value == "J1990.000"
assert c2.obstime.value == "J1980.000"
# The work-around for #5722
c = SkyCoord(1 * u.deg, 2 * u.deg, frame="fk5")
c1 = SkyCoord(1 * u.deg, 2 * u.deg, frame="fk5", equinox="B1950.000")
c2 = c1.transform_to(c)
assert not c2.is_equivalent_frame(c) # counterintuitive, but documented
assert c2.equinox.value == "B1950.000"
c3 = c1.transform_to(c, merge_attributes=False)
assert c3.equinox.value == "J2000.000"
assert c3.is_equivalent_frame(c)
def test_deepcopy():
c1 = SkyCoord(1 * u.deg, 2 * u.deg)
c2 = copy.copy(c1)
c3 = copy.deepcopy(c1)
c4 = SkyCoord(
[1, 2] * u.m,
[2, 3] * u.m,
[3, 4] * u.m,
representation_type="cartesian",
frame="fk5",
obstime="J1999.9",
equinox="J1988.8",
)
c5 = copy.deepcopy(c4)
assert np.all(c5.x == c4.x) # and y and z
assert c5.frame.name == c4.frame.name
assert c5.obstime == c4.obstime
assert c5.equinox == c4.equinox
assert c5.representation_type == c4.representation_type
def test_no_copy():
c1 = SkyCoord(np.arange(10.0) * u.hourangle, np.arange(20.0, 30.0) * u.deg)
c2 = SkyCoord(c1, copy=False)
# Note: c1.ra and c2.ra will *not* share memory, as these are recalculated
# to be in "preferred" units. See discussion in #4883.
assert np.may_share_memory(c1.data.lon, c2.data.lon)
c3 = SkyCoord(c1, copy=True)
assert not np.may_share_memory(c1.data.lon, c3.data.lon)
def test_immutable():
c1 = SkyCoord(1 * u.deg, 2 * u.deg)
with pytest.raises(AttributeError):
c1.ra = 3.0
c1.foo = 42
assert c1.foo == 42
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
def test_search_around():
"""
Test the search_around_* methods
Here we don't actually test the values are right, just that the methods of
SkyCoord work. The accuracy tests are in ``test_matching.py``
"""
from astropy.utils import NumpyRNGContext
with NumpyRNGContext(987654321):
sc1 = SkyCoord(
np.random.rand(20) * 360.0 * u.degree,
(np.random.rand(20) * 180.0 - 90.0) * u.degree,
)
sc2 = SkyCoord(
np.random.rand(100) * 360.0 * u.degree,
(np.random.rand(100) * 180.0 - 90.0) * u.degree,
)
sc1ds = SkyCoord(ra=sc1.ra, dec=sc1.dec, distance=np.random.rand(20) * u.kpc)
sc2ds = SkyCoord(ra=sc2.ra, dec=sc2.dec, distance=np.random.rand(100) * u.kpc)
idx1_sky, idx2_sky, d2d_sky, d3d_sky = sc1.search_around_sky(sc2, 10 * u.deg)
idx1_3d, idx2_3d, d2d_3d, d3d_3d = sc1ds.search_around_3d(sc2ds, 250 * u.pc)
def test_init_with_frame_instance_keyword():
# Frame instance
c1 = SkyCoord(3 * u.deg, 4 * u.deg, frame=FK5(equinox="J2010"))
assert c1.equinox == Time("J2010")
# Frame instance with data (data gets ignored)
c2 = SkyCoord(
3 * u.deg, 4 * u.deg, frame=FK5(1.0 * u.deg, 2 * u.deg, equinox="J2010")
)
assert c2.equinox == Time("J2010")
assert allclose(c2.ra.degree, 3)
assert allclose(c2.dec.degree, 4)
# SkyCoord instance
c3 = SkyCoord(3 * u.deg, 4 * u.deg, frame=c1)
assert c3.equinox == Time("J2010")
# Check duplicate arguments
with pytest.raises(ValueError) as err:
c = SkyCoord(3 * u.deg, 4 * u.deg, frame=FK5(equinox="J2010"), equinox="J2001")
assert "Cannot specify frame attribute 'equinox'" in str(err.value)
def test_guess_from_table():
from astropy.table import Column, Table
from astropy.utils import NumpyRNGContext
tab = Table()
with NumpyRNGContext(987654321):
tab.add_column(Column(data=np.random.rand(10), unit="deg", name="RA[J2000]"))
tab.add_column(Column(data=np.random.rand(10), unit="deg", name="DEC[J2000]"))
sc = SkyCoord.guess_from_table(tab)
npt.assert_array_equal(sc.ra.deg, tab["RA[J2000]"])
npt.assert_array_equal(sc.dec.deg, tab["DEC[J2000]"])
# try without units in the table
tab["RA[J2000]"].unit = None
tab["DEC[J2000]"].unit = None
# should fail if not given explicitly
with pytest.raises(u.UnitsError):
sc2 = SkyCoord.guess_from_table(tab)
# but should work if provided
sc2 = SkyCoord.guess_from_table(tab, unit=u.deg)
npt.assert_array_equal(sc2.ra.deg, tab["RA[J2000]"])
npt.assert_array_equal(sc2.dec.deg, tab["DEC[J2000]"])
# should fail if two options are available - ambiguity bad!
tab.add_column(Column(data=np.random.rand(10), name="RA_J1900"))
with pytest.raises(ValueError) as excinfo:
SkyCoord.guess_from_table(tab, unit=u.deg)
assert "J1900" in excinfo.value.args[0] and "J2000" in excinfo.value.args[0]
tab.remove_column("RA_J1900")
tab["RA[J2000]"].unit = u.deg
tab["DEC[J2000]"].unit = u.deg
# but should succeed if the ambiguity can be broken b/c one of the matches
# is the name of a different component
tab.add_column(Column(data=np.random.rand(10) * u.mas / u.yr, name="pm_ra_cosdec"))
tab.add_column(Column(data=np.random.rand(10) * u.mas / u.yr, name="pm_dec"))
sc3 = SkyCoord.guess_from_table(tab)
assert u.allclose(sc3.ra, tab["RA[J2000]"])
assert u.allclose(sc3.dec, tab["DEC[J2000]"])
assert u.allclose(sc3.pm_ra_cosdec, tab["pm_ra_cosdec"])
assert u.allclose(sc3.pm_dec, tab["pm_dec"])
# should fail if stuff doesn't have proper units
tab["RA[J2000]"].unit = None
tab["DEC[J2000]"].unit = None
with pytest.raises(u.UnitTypeError, match="no unit was given."):
SkyCoord.guess_from_table(tab)
tab.remove_column("pm_ra_cosdec")
tab.remove_column("pm_dec")
# should also fail if user specifies something already in the table, but
# should succeed even if the user has to give one of the components
with pytest.raises(ValueError):
SkyCoord.guess_from_table(tab, ra=tab["RA[J2000]"], unit=u.deg)
oldra = tab["RA[J2000]"]
tab.remove_column("RA[J2000]")
sc3 = SkyCoord.guess_from_table(tab, ra=oldra, unit=u.deg)
npt.assert_array_equal(sc3.ra.deg, oldra)
npt.assert_array_equal(sc3.dec.deg, tab["DEC[J2000]"])
# check a few non-ICRS/spherical systems
x, y, z = np.arange(3).reshape(3, 1) * u.pc
l, b = np.arange(2).reshape(2, 1) * u.deg
tabcart = Table([x, y, z], names=("x", "y", "z"))
tabgal = Table([b, l], names=("b", "l"))
sc_cart = SkyCoord.guess_from_table(tabcart, representation_type="cartesian")
npt.assert_array_equal(sc_cart.x, x)
npt.assert_array_equal(sc_cart.y, y)
npt.assert_array_equal(sc_cart.z, z)
sc_gal = SkyCoord.guess_from_table(tabgal, frame="galactic")
npt.assert_array_equal(sc_gal.l, l)
npt.assert_array_equal(sc_gal.b, b)
# also try some column names that *end* with the attribute name
tabgal["b"].name = "gal_b"
tabgal["l"].name = "gal_l"
SkyCoord.guess_from_table(tabgal, frame="galactic")
tabgal["gal_b"].name = "blob"
tabgal["gal_l"].name = "central"
with pytest.raises(ValueError):
SkyCoord.guess_from_table(tabgal, frame="galactic")
def test_skycoord_list_creation():
"""
Test that SkyCoord can be created in a reasonable way with lists of SkyCoords
(regression for #2702)
"""
sc = SkyCoord(ra=[1, 2, 3] * u.deg, dec=[4, 5, 6] * u.deg)
sc0 = sc[0]
sc2 = sc[2]
scnew = SkyCoord([sc0, sc2])
assert np.all(scnew.ra == [1, 3] * u.deg)
assert np.all(scnew.dec == [4, 6] * u.deg)
# also check ranges
sc01 = sc[:2]
scnew2 = SkyCoord([sc01, sc2])
assert np.all(scnew2.ra == sc.ra)
assert np.all(scnew2.dec == sc.dec)
# now try with a mix of skycoord, frame, and repr objects
frobj = ICRS(2 * u.deg, 5 * u.deg)
reprobj = UnitSphericalRepresentation(3 * u.deg, 6 * u.deg)
scnew3 = SkyCoord([sc0, frobj, reprobj])
assert np.all(scnew3.ra == sc.ra)
assert np.all(scnew3.dec == sc.dec)
# should *fail* if different frame attributes or types are passed in
scfk5_j2000 = SkyCoord(1 * u.deg, 4 * u.deg, frame="fk5")
with pytest.raises(ValueError):
SkyCoord([sc0, scfk5_j2000])
scfk5_j2010 = SkyCoord(1 * u.deg, 4 * u.deg, frame="fk5", equinox="J2010")
with pytest.raises(ValueError):
SkyCoord([scfk5_j2000, scfk5_j2010])
# but they should inherit if they're all consistent
scfk5_2_j2010 = SkyCoord(2 * u.deg, 5 * u.deg, frame="fk5", equinox="J2010")
scfk5_3_j2010 = SkyCoord(3 * u.deg, 6 * u.deg, frame="fk5", equinox="J2010")
scnew4 = SkyCoord([scfk5_j2010, scfk5_2_j2010, scfk5_3_j2010])
assert np.all(scnew4.ra == sc.ra)
assert np.all(scnew4.dec == sc.dec)
assert scnew4.equinox == Time("J2010")
def test_nd_skycoord_to_string():
c = SkyCoord(np.ones((2, 2)), 1, unit=("deg", "deg"))
ts = c.to_string()
assert np.all(ts.shape == c.shape)
assert np.all(ts == "1 1")
def test_equiv_skycoord():
sci1 = SkyCoord(1 * u.deg, 2 * u.deg, frame="icrs")
sci2 = SkyCoord(1 * u.deg, 3 * u.deg, frame="icrs")
assert sci1.is_equivalent_frame(sci1)
assert sci1.is_equivalent_frame(sci2)
assert sci1.is_equivalent_frame(ICRS())
assert not sci1.is_equivalent_frame(FK5())
with pytest.raises(TypeError):
sci1.is_equivalent_frame(10)
scf1 = SkyCoord(1 * u.deg, 2 * u.deg, frame="fk5")
scf2 = SkyCoord(1 * u.deg, 2 * u.deg, frame="fk5", equinox="J2005")
# obstime is *not* an FK5 attribute, but we still want scf1 and scf3 to come
# to come out different because they're part of SkyCoord
scf3 = SkyCoord(1 * u.deg, 2 * u.deg, frame="fk5", obstime="J2005")
assert scf1.is_equivalent_frame(scf1)
assert not scf1.is_equivalent_frame(sci1)
assert scf1.is_equivalent_frame(FK5())
assert not scf1.is_equivalent_frame(scf2)
assert scf2.is_equivalent_frame(FK5(equinox="J2005"))
assert not scf3.is_equivalent_frame(scf1)
assert not scf3.is_equivalent_frame(FK5(equinox="J2005"))
def test_equiv_skycoord_with_extra_attrs():
"""Regression test for #10658."""
# GCRS has a CartesianRepresentationAttribute called obsgeoloc
gcrs = GCRS(
1 * u.deg, 2 * u.deg, obsgeoloc=CartesianRepresentation([1, 2, 3], unit=u.m)
)
# Create a SkyCoord where obsgeoloc tags along as an extra attribute
sc1 = SkyCoord(gcrs).transform_to(ICRS)
# Now create a SkyCoord with an equivalent frame but without the extra attribute
sc2 = SkyCoord(sc1.frame)
# The SkyCoords are therefore not equivalent, but check both directions
assert not sc1.is_equivalent_frame(sc2)
# This way around raised a TypeError which is fixed by #10658
assert not sc2.is_equivalent_frame(sc1)
def test_constellations():
# the actual test for accuracy is in test_funcs - this is just meant to make
# sure we get sensible answers
sc = SkyCoord(135 * u.deg, 65 * u.deg)
assert sc.get_constellation() == "Ursa Major"
assert sc.get_constellation(short_name=True) == "UMa"
scs = SkyCoord([135] * 2 * u.deg, [65] * 2 * u.deg)
npt.assert_equal(scs.get_constellation(), ["Ursa Major"] * 2)
npt.assert_equal(scs.get_constellation(short_name=True), ["UMa"] * 2)
@pytest.mark.remote_data
def test_constellations_with_nameresolve():
assert SkyCoord.from_name("And I").get_constellation(short_name=True) == "And"
# you'd think "And ..." should be in Andromeda. But you'd be wrong.
assert SkyCoord.from_name("And VI").get_constellation() == "Pegasus"
# maybe it's because And VI isn't really a galaxy?
assert SkyCoord.from_name("And XXII").get_constellation() == "Pisces"
assert SkyCoord.from_name("And XXX").get_constellation() == "Cassiopeia"
# ok maybe not
# ok, but at least some of the others do make sense...
assert (
SkyCoord.from_name("Coma Cluster").get_constellation(short_name=True) == "Com"
)
assert SkyCoord.from_name("Orion Nebula").get_constellation() == "Orion"
assert SkyCoord.from_name("Triangulum Galaxy").get_constellation() == "Triangulum"
def test_getitem_representation():
"""
Make sure current representation survives __getitem__ even if different
from data representation.
"""
sc = SkyCoord([1, 1] * u.deg, [2, 2] * u.deg)
sc.representation_type = "cartesian"
assert sc[0].representation_type is CartesianRepresentation
def test_spherical_offsets_to_api():
i00 = SkyCoord(0 * u.arcmin, 0 * u.arcmin, frame="icrs")
fk5 = SkyCoord(0 * u.arcmin, 0 * u.arcmin, frame="fk5")
with pytest.raises(ValueError):
# different frames should fail
i00.spherical_offsets_to(fk5)
i1deg = ICRS(1 * u.deg, 1 * u.deg)
dra, ddec = i00.spherical_offsets_to(i1deg)
assert_allclose(dra, 1 * u.deg)
assert_allclose(ddec, 1 * u.deg)
# make sure an abbreviated array-based version of the above also works
i00s = SkyCoord([0] * 4 * u.arcmin, [0] * 4 * u.arcmin, frame="icrs")
i01s = SkyCoord([0] * 4 * u.arcmin, np.arange(4) * u.arcmin, frame="icrs")
dra, ddec = i00s.spherical_offsets_to(i01s)
assert_allclose(dra, 0 * u.arcmin)
assert_allclose(ddec, np.arange(4) * u.arcmin)
@pytest.mark.parametrize("frame", ["icrs", "galactic"])
@pytest.mark.parametrize(
"comparison_data",
[
(0 * u.arcmin, 1 * u.arcmin),
(1 * u.arcmin, 0 * u.arcmin),
(1 * u.arcmin, 1 * u.arcmin),
],
)
def test_spherical_offsets_roundtrip(frame, comparison_data):
i00 = SkyCoord(0 * u.arcmin, 0 * u.arcmin, frame=frame)
comparison = SkyCoord(*comparison_data, frame=frame)
dlon, dlat = i00.spherical_offsets_to(comparison)
assert_allclose(dlon, comparison.data.lon)
assert_allclose(dlat, comparison.data.lat)
i00_back = comparison.spherical_offsets_by(-dlon, -dlat)
# This reaches machine precision when only one component is changed, but for
# the third parametrized case (both lon and lat change), the transformation
# will have finite accuracy:
assert_allclose(i00_back.data.lon, i00.data.lon, atol=1e-10 * u.rad)
assert_allclose(i00_back.data.lat, i00.data.lat, atol=1e-10 * u.rad)
# Test roundtripping the other direction:
init_c = SkyCoord(40.0 * u.deg, 40.0 * u.deg, frame=frame)
new_c = init_c.spherical_offsets_by(3.534 * u.deg, 2.2134 * u.deg)
dlon, dlat = new_c.spherical_offsets_to(init_c)
back_c = new_c.spherical_offsets_by(dlon, dlat)
assert init_c.separation(back_c) < 1e-10 * u.deg
def test_frame_attr_changes():
"""
This tests the case where a frame is added with a new frame attribute after
a SkyCoord has been created. This is necessary because SkyCoords get the
attributes set at creation time, but the set of attributes can change as
frames are added or removed from the transform graph. This makes sure that
everything continues to work consistently.
"""
sc_before = SkyCoord(1 * u.deg, 2 * u.deg, frame="icrs")
assert "fakeattr" not in dir(sc_before)
class FakeFrame(BaseCoordinateFrame):
fakeattr = Attribute()
# doesn't matter what this does as long as it just puts the frame in the
# transform graph
transset = (ICRS, FakeFrame, lambda c, f: c)
frame_transform_graph.add_transform(*transset)
try:
assert "fakeattr" in dir(sc_before)
assert sc_before.fakeattr is None
sc_after1 = SkyCoord(1 * u.deg, 2 * u.deg, frame="icrs")
assert "fakeattr" in dir(sc_after1)
assert sc_after1.fakeattr is None
sc_after2 = SkyCoord(1 * u.deg, 2 * u.deg, frame="icrs", fakeattr=1)
assert sc_after2.fakeattr == 1
finally:
frame_transform_graph.remove_transform(*transset)
assert "fakeattr" not in dir(sc_before)
assert "fakeattr" not in dir(sc_after1)
assert "fakeattr" not in dir(sc_after2)
def test_cache_clear_sc():
from astropy.coordinates import SkyCoord
i = SkyCoord(1 * u.deg, 2 * u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
assert len(i.cache["representation"]) == 2
i.cache.clear()
assert len(i.cache["representation"]) == 0
def test_set_attribute_exceptions():
"""Ensure no attrbute for any frame can be set directly.
Though it is fine if the current frame does not have it."""
sc = SkyCoord(1.0 * u.deg, 2.0 * u.deg, frame="fk5")
assert hasattr(sc.frame, "equinox")
with pytest.raises(AttributeError):
sc.equinox = "B1950"
assert sc.relative_humidity is None
sc.relative_humidity = 0.5
assert sc.relative_humidity == 0.5
assert not hasattr(sc.frame, "relative_humidity")
def test_extra_attributes():
"""Ensure any extra attributes are dealt with correctly.
Regression test against #5743.
"""
obstime_string = ["2017-01-01T00:00", "2017-01-01T00:10"]
obstime = Time(obstime_string)
sc = SkyCoord([5, 10], [20, 30], unit=u.deg, obstime=obstime_string)
assert not hasattr(sc.frame, "obstime")
assert type(sc.obstime) is Time
assert sc.obstime.shape == (2,)
assert np.all(sc.obstime == obstime)
# ensure equivalency still works for more than one obstime.
assert sc.is_equivalent_frame(sc)
sc_1 = sc[1]
assert sc_1.obstime == obstime[1]
# Transforming to FK4 should use sc.obstime.
sc_fk4 = sc.transform_to("fk4")
assert np.all(sc_fk4.frame.obstime == obstime)
# And transforming back should not loose it.
sc2 = sc_fk4.transform_to("icrs")
assert not hasattr(sc2.frame, "obstime")
assert np.all(sc2.obstime == obstime)
# Ensure obstime get taken from the SkyCoord if passed in directly.
# (regression test for #5749).
sc3 = SkyCoord([0.0, 1.0], [2.0, 3.0], unit="deg", frame=sc)
assert np.all(sc3.obstime == obstime)
# Finally, check that we can delete such attributes.
del sc3.obstime
assert sc3.obstime is None
def test_apply_space_motion():
# use this 12 year period because it's a multiple of 4 to avoid the quirks
# of leap years while having 2 leap seconds in it
t1 = Time("2000-01-01T00:00")
t2 = Time("2012-01-01T00:00")
# Check a very simple case first:
frame = ICRS(
ra=10.0 * u.deg,
dec=0 * u.deg,
distance=10.0 * u.pc,
pm_ra_cosdec=0.1 * u.deg / u.yr,
pm_dec=0 * u.mas / u.yr,
radial_velocity=0 * u.km / u.s,
)
# Cases that should work (just testing input for now):
c1 = SkyCoord(frame, obstime=t1, pressure=101 * u.kPa)
with pytest.warns(ErfaWarning, match='ERFA function "pmsafe" yielded .*'):
# warning raised due to high PM chosen above
applied1 = c1.apply_space_motion(new_obstime=t2)
applied2 = c1.apply_space_motion(dt=12 * u.year)
assert isinstance(applied1.frame, c1.frame.__class__)
assert isinstance(applied2.frame, c1.frame.__class__)
assert_allclose(applied1.ra, applied2.ra)
assert_allclose(applied1.pm_ra_cosdec, applied2.pm_ra_cosdec)
assert_allclose(applied1.dec, applied2.dec)
assert_allclose(applied1.distance, applied2.distance)
# ensure any frame attributes that were there before get passed through
assert applied1.pressure == c1.pressure
# there were 2 leap seconds between 2000 and 2010, so the difference in
# the two forms of time evolution should be ~2 sec
adt = np.abs(applied2.obstime - applied1.obstime)
assert 1.9 * u.second < adt.to(u.second) < 2.1 * u.second
c2 = SkyCoord(frame)
with pytest.warns(ErfaWarning, match='ERFA function "pmsafe" yielded .*'):
# warning raised due to high PM chosen above
applied3 = c2.apply_space_motion(dt=6 * u.year)
assert isinstance(applied3.frame, c1.frame.__class__)
assert applied3.obstime is None
# this should *not* be .6 deg due to space-motion on a sphere, but it
# should be fairly close
assert 0.5 * u.deg < applied3.ra - c1.ra < 0.7 * u.deg
# the two cases should only match somewhat due to it being space motion, but
# they should be at least this close
assert quantity_allclose(
applied1.ra - c1.ra, (applied3.ra - c1.ra) * 2, atol=1e-3 * u.deg
)
# but *not* this close
assert not quantity_allclose(
applied1.ra - c1.ra, (applied3.ra - c1.ra) * 2, atol=1e-4 * u.deg
)
with pytest.raises(ValueError):
c2.apply_space_motion(new_obstime=t2)
def test_custom_frame_skycoord():
# also regression check for the case from #7069
class BlahBleeBlopFrame(BaseCoordinateFrame):
default_representation = SphericalRepresentation
# without a differential, SkyCoord creation fails
# default_differential = SphericalDifferential
_frame_specific_representation_info = {
"spherical": [
RepresentationMapping("lon", "lon", "recommended"),
RepresentationMapping("lat", "lat", "recommended"),
RepresentationMapping("distance", "radius", "recommended"),
]
}
SkyCoord(lat=1 * u.deg, lon=2 * u.deg, frame=BlahBleeBlopFrame)
def test_user_friendly_pm_error():
"""
This checks that a more user-friendly error message is raised for the user
if they pass, e.g., pm_ra instead of pm_ra_cosdec
"""
with pytest.raises(ValueError) as e:
SkyCoord(
ra=150 * u.deg,
dec=-11 * u.deg,
pm_ra=100 * u.mas / u.yr,
pm_dec=10 * u.mas / u.yr,
)
assert "pm_ra_cosdec" in str(e.value)
with pytest.raises(ValueError) as e:
SkyCoord(
l=150 * u.deg,
b=-11 * u.deg,
pm_l=100 * u.mas / u.yr,
pm_b=10 * u.mas / u.yr,
frame="galactic",
)
assert "pm_l_cosb" in str(e.value)
# The special error should not turn on here:
with pytest.raises(ValueError) as e:
SkyCoord(
x=1 * u.pc,
y=2 * u.pc,
z=3 * u.pc,
pm_ra=100 * u.mas / u.yr,
pm_dec=10 * u.mas / u.yr,
representation_type="cartesian",
)
assert "pm_ra_cosdec" not in str(e.value)
def test_contained_by():
"""
Test Skycoord.contained(wcs,image)
"""
header = """
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 1045.0 / Pixel coordinate of reference point
CRPIX2 = 1001.0 / Pixel coordinate of reference point
PC1_1 = -0.00556448550786 / Coordinate transformation matrix element
PC1_2 = -0.001042120133257 / Coordinate transformation matrix element
PC2_1 = 0.001181477028705 / Coordinate transformation matrix element
PC2_2 = -0.005590809742987 / Coordinate transformation matrix element
CDELT1 = 1.0 / [deg] Coordinate increment at reference point
CDELT2 = 1.0 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'RA---TAN' / TAN (gnomonic) projection + SIP distortions
CTYPE2 = 'DEC--TAN' / TAN (gnomonic) projection + SIP distortions
CRVAL1 = 250.34971683647 / [deg] Coordinate value at reference point
CRVAL2 = 2.2808772582495 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 2.2808772582495 / [deg] Native latitude of celestial pole
RADESYS = 'ICRS' / Equatorial coordinate system
MJD-OBS = 58612.339199259 / [d] MJD of observation matching DATE-OBS
DATE-OBS= '2019-05-09T08:08:26.816Z' / ISO-8601 observation date matching MJD-OB
NAXIS = 2 / NAXIS
NAXIS1 = 2136 / length of first array dimension
NAXIS2 = 2078 / length of second array dimension
"""
test_wcs = WCS(fits.Header.fromstring(header.strip(), "\n"))
assert SkyCoord(254, 2, unit="deg").contained_by(test_wcs)
assert not SkyCoord(240, 2, unit="deg").contained_by(test_wcs)
img = np.zeros((2136, 2078))
assert SkyCoord(250, 2, unit="deg").contained_by(test_wcs, img)
assert not SkyCoord(240, 2, unit="deg").contained_by(test_wcs, img)
ra = np.array([254.2, 254.1])
dec = np.array([2, 12.1])
coords = SkyCoord(ra, dec, unit="deg")
assert np.all(test_wcs.footprint_contains(coords) == np.array([True, False]))
def test_none_differential_type():
"""
This is a regression test for #8021
"""
from astropy.coordinates import BaseCoordinateFrame
class MockHeliographicStonyhurst(BaseCoordinateFrame):
default_representation = SphericalRepresentation
frame_specific_representation_info = {
SphericalRepresentation: [
RepresentationMapping(
reprname="lon", framename="lon", defaultunit=u.deg
),
RepresentationMapping(
reprname="lat", framename="lat", defaultunit=u.deg
),
RepresentationMapping(
reprname="distance", framename="radius", defaultunit=None
),
]
}
fr = MockHeliographicStonyhurst(lon=1 * u.deg, lat=2 * u.deg, radius=10 * u.au)
SkyCoord(0 * u.deg, fr.lat, fr.radius, frame=fr) # this was the failure
def test_multiple_aliases():
# Define a frame with multiple aliases
class MultipleAliasesFrame(BaseCoordinateFrame):
name = ["alias_1", "alias_2"]
default_representation = SphericalRepresentation
# Register a transform, which adds the aliases to the transform graph
tfun = lambda c, f: f.__class__(lon=c.lon, lat=c.lat)
ftrans = FunctionTransform(
tfun,
MultipleAliasesFrame,
MultipleAliasesFrame,
register_graph=frame_transform_graph,
)
coord = SkyCoord(lon=1 * u.deg, lat=2 * u.deg, frame=MultipleAliasesFrame)
# Test attribute-style access returns self (not a copy)
assert coord.alias_1 is coord
assert coord.alias_2 is coord
# Test for aliases in __dir__()
assert "alias_1" in coord.__dir__()
assert "alias_2" in coord.__dir__()
# Test transform_to() calls
assert isinstance(coord.transform_to("alias_1").frame, MultipleAliasesFrame)
assert isinstance(coord.transform_to("alias_2").frame, MultipleAliasesFrame)
ftrans.unregister(frame_transform_graph)
@pytest.mark.parametrize(
"kwargs, error_message",
[
(
{"ra": 1, "dec": 1, "distance": 1 * u.pc, "unit": "deg"},
r"Unit 'deg' \(angle\) could not be applied to 'distance'. ",
),
(
{
"rho": 1 * u.m,
"phi": 1,
"z": 1 * u.m,
"unit": "deg",
"representation_type": "cylindrical",
},
r"Unit 'deg' \(angle\) could not be applied to 'rho'. ",
),
],
)
def test_passing_inconsistent_coordinates_and_units_raises_helpful_error(
kwargs, error_message
):
# https://github.com/astropy/astropy/issues/10725
with pytest.raises(ValueError, match=error_message):
SkyCoord(**kwargs)
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy.")
def test_match_to_catalog_3d_and_sky():
# Test for issue #5857. See PR #11449
cfk5_default = SkyCoord(
[1, 2, 3, 4] * u.degree,
[0, 0, 0, 0] * u.degree,
distance=[1, 1, 1.5, 1] * u.kpc,
frame="fk5",
)
cfk5_J1950 = cfk5_default.transform_to(FK5(equinox="J1950"))
idx, angle, quantity = cfk5_J1950.match_to_catalog_3d(cfk5_default)
npt.assert_array_equal(idx, [0, 1, 2, 3])
assert_allclose(angle, 0 * u.deg, atol=1e-14 * u.deg, rtol=0)
assert_allclose(quantity, 0 * u.kpc, atol=1e-14 * u.kpc, rtol=0)
idx, angle, distance = cfk5_J1950.match_to_catalog_sky(cfk5_default)
npt.assert_array_equal(idx, [0, 1, 2, 3])
assert_allclose(angle, 0 * u.deg, atol=1e-14 * u.deg, rtol=0)
assert_allclose(distance, 0 * u.kpc, atol=1e-14 * u.kpc, rtol=0)
def test_subclass_property_exception_error():
"""Regression test for gh-8340.
Non-existing attribute access inside a property should give attribute
error for the attribute, not for the property.
"""
class custom_coord(SkyCoord):
@property
def prop(self):
return self.random_attr
c = custom_coord("00h42m30s", "+41d12m00s", frame="icrs")
with pytest.raises(AttributeError, match="random_attr"):
# Before this matched "prop" rather than "random_attr"
c.prop
|
8a83c5d9d9c6615c7109b13ead211424713fb2d442026a9f881aecc4e65b71ad | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This includes tests for the Distance class and related calculations
"""
import numpy as np
import pytest
from numpy import testing as npt
from astropy import units as u
from astropy.coordinates import CartesianRepresentation, Distance, Latitude, Longitude
from astropy.coordinates.builtin_frames import ICRS, Galactic
from astropy.units import allclose as quantity_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.exceptions import AstropyWarning
MULTIPLE_INPUTS_ERROR_MSG = "^more than one of `.*` were given to Distance constructor$"
def test_distances():
"""
Tests functionality for Coordinate class distances and cartesian
transformations.
"""
"""
Distances can also be specified, and allow for a full 3D definition of a
coordinate.
"""
# try all the different ways to initialize a Distance
distance = Distance(12, u.parsec)
Distance(40, unit=u.au)
Distance(value=5, unit=u.kpc)
# need to provide a unit
with pytest.raises(u.UnitsError):
Distance(12)
with pytest.raises(ValueError, match="none of `value`, `z`, `distmod`,"):
Distance(unit=u.km)
# standard units are pre-defined
npt.assert_allclose(distance.lyr, 39.138765325702551)
npt.assert_allclose(distance.km, 370281309776063.0)
# Coordinate objects can be assigned a distance object, giving them a full
# 3D position
c = Galactic(
l=158.558650 * u.degree,
b=-43.350066 * u.degree,
distance=Distance(12, u.parsec),
)
assert quantity_allclose(c.distance, 12 * u.pc)
# or initialize distances via redshifts - this is actually tested in the
# function below that checks for scipy. This is kept here as an example
# c.distance = Distance(z=0.2) # uses current cosmology
# with whatever your preferred cosmology may be
# c.distance = Distance(z=0.2, cosmology=WMAP5)
# Coordinate objects can be initialized with a distance using special
# syntax
c1 = Galactic(l=158.558650 * u.deg, b=-43.350066 * u.deg, distance=12 * u.kpc)
# Coordinate objects can be instantiated with cartesian coordinates
# Internally they will immediately be converted to two angles + a distance
cart = CartesianRepresentation(x=2 * u.pc, y=4 * u.pc, z=8 * u.pc)
c2 = Galactic(cart)
sep12 = c1.separation_3d(c2)
# returns a *3d* distance between the c1 and c2 coordinates
# not that this does *not*
assert isinstance(sep12, Distance)
npt.assert_allclose(sep12.pc, 12005.784163916317, 10)
"""
All spherical coordinate systems with distances can be converted to
cartesian coordinates.
"""
cartrep2 = c2.cartesian
assert isinstance(cartrep2.x, u.Quantity)
npt.assert_allclose(cartrep2.x.value, 2)
npt.assert_allclose(cartrep2.y.value, 4)
npt.assert_allclose(cartrep2.z.value, 8)
# with no distance, the unit sphere is assumed when converting to cartesian
c3 = Galactic(l=158.558650 * u.degree, b=-43.350066 * u.degree, distance=None)
unitcart = c3.cartesian
npt.assert_allclose(
((unitcart.x**2 + unitcart.y**2 + unitcart.z**2) ** 0.5).value, 1.0
)
# TODO: choose between these when CartesianRepresentation gets a definite
# decision on whether or not it gets __add__
#
# CartesianRepresentation objects can be added and subtracted, which are
# vector/elementwise they can also be given as arguments to a coordinate
# system
# csum = ICRS(c1.cartesian + c2.cartesian)
csumrep = CartesianRepresentation(c1.cartesian.xyz + c2.cartesian.xyz)
csum = ICRS(csumrep)
npt.assert_allclose(csumrep.x.value, -8.12016610185)
npt.assert_allclose(csumrep.y.value, 3.19380597435)
npt.assert_allclose(csumrep.z.value, -8.2294483707)
npt.assert_allclose(csum.ra.degree, 158.529401774)
npt.assert_allclose(csum.dec.degree, -43.3235825777)
npt.assert_allclose(csum.distance.kpc, 11.9942200501)
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
def test_distances_scipy():
"""
The distance-related tests that require scipy due to the cosmology
module needing scipy integration routines
"""
from astropy.cosmology import WMAP5
# try different ways to initialize a Distance
d4 = Distance(z=0.23) # uses default cosmology - as of writing, WMAP7
npt.assert_allclose(d4.z, 0.23, rtol=1e-8)
d5 = Distance(z=0.23, cosmology=WMAP5)
npt.assert_allclose(d5.compute_z(WMAP5), 0.23, rtol=1e-8)
d6 = Distance(z=0.23, cosmology=WMAP5, unit=u.km)
npt.assert_allclose(d6.value, 3.5417046898762366e22)
with pytest.raises(ValueError, match="a `cosmology` was given but `z`"):
Distance(parallax=1 * u.mas, cosmology=WMAP5)
# Regression test for #12531
with pytest.raises(ValueError, match=MULTIPLE_INPUTS_ERROR_MSG):
Distance(z=0.23, parallax=1 * u.mas)
# vectors! regression test for #11949
d4 = Distance(z=[0.23, 0.45]) # as of writing, Planck18
npt.assert_allclose(d4.z, [0.23, 0.45], rtol=1e-8)
def test_distance_change():
ra = Longitude("4:08:15.162342", unit=u.hour)
dec = Latitude("-41:08:15.162342", unit=u.degree)
c1 = ICRS(ra, dec, Distance(1, unit=u.kpc))
oldx = c1.cartesian.x.value
assert (oldx - 0.35284083171901953) < 1e-10
# first make sure distances are immutable
with pytest.raises(AttributeError):
c1.distance = Distance(2, unit=u.kpc)
# now x should increase with a bigger distance increases
c2 = ICRS(ra, dec, Distance(2, unit=u.kpc))
assert c2.cartesian.x.value == oldx * 2
def test_distance_is_quantity():
"""
test that distance behaves like a proper quantity
"""
Distance(2 * u.kpc)
d = Distance([2, 3.1], u.kpc)
assert d.shape == (2,)
a = d.view(np.ndarray)
q = d.view(u.Quantity)
a[0] = 1.2
q.value[1] = 5.4
assert d[0].value == 1.2
assert d[1].value == 5.4
q = u.Quantity(d, copy=True)
q.value[1] = 0
assert q.value[1] == 0
assert d.value[1] != 0
# regression test against #2261
d = Distance([2 * u.kpc, 250.0 * u.pc])
assert d.unit is u.kpc
assert np.all(d.value == np.array([2.0, 0.25]))
def test_distmod():
d = Distance(10, u.pc)
assert d.distmod.value == 0
d = Distance(distmod=20)
assert d.distmod.value == 20
assert d.kpc == 100
d = Distance(distmod=-1.0, unit=u.au)
npt.assert_allclose(d.value, 1301442.9440836983)
with pytest.raises(ValueError, match=MULTIPLE_INPUTS_ERROR_MSG):
d = Distance(value=d, distmod=20)
with pytest.raises(ValueError, match=MULTIPLE_INPUTS_ERROR_MSG):
d = Distance(z=0.23, distmod=20)
# check the Mpc/kpc/pc behavior
assert Distance(distmod=1).unit == u.pc
assert Distance(distmod=11).unit == u.kpc
assert Distance(distmod=26).unit == u.Mpc
assert Distance(distmod=-21).unit == u.AU
# if an array, uses the mean of the log of the distances
assert Distance(distmod=[1, 11, 26]).unit == u.kpc
def test_parallax():
d = Distance(parallax=1 * u.arcsecond)
assert d.pc == 1.0
with pytest.raises(ValueError, match=MULTIPLE_INPUTS_ERROR_MSG):
d = Distance(15 * u.pc, parallax=20 * u.milliarcsecond)
with pytest.raises(ValueError, match=MULTIPLE_INPUTS_ERROR_MSG):
d = Distance(parallax=20 * u.milliarcsecond, distmod=20)
# array
plx = [1, 10, 100.0] * u.mas
d = Distance(parallax=plx)
assert quantity_allclose(d.pc, [1000.0, 100.0, 10.0])
assert quantity_allclose(plx, d.parallax)
error_message = (
r"^some parallaxes are negative, which are not interpretable as distances\. "
)
with pytest.raises(ValueError, match=error_message):
Distance(parallax=-1 * u.mas)
with pytest.raises(ValueError, match=error_message):
Distance(parallax=[10, 1, -1] * u.mas)
warning_message = "^negative parallaxes are converted to NaN distances even when"
with pytest.warns(AstropyWarning, match=warning_message):
Distance(parallax=-1 * u.mas, allow_negative=True)
with pytest.warns(AstropyWarning, match=warning_message):
Distance(parallax=[10, 1, -1] * u.mas, allow_negative=True)
# Regression test for #12569; `unit` was ignored if `parallax` was given.
d = Distance(parallax=1 * u.mas, unit=u.kpc)
assert d.value == 1.0
assert d.unit is u.kpc
def test_distance_in_coordinates():
"""
test that distances can be created from quantities and that cartesian
representations come out right
"""
ra = Longitude("4:08:15.162342", unit=u.hour)
dec = Latitude("-41:08:15.162342", unit=u.degree)
coo = ICRS(ra, dec, distance=2 * u.kpc)
cart = coo.cartesian
assert isinstance(cart.xyz, u.Quantity)
def test_negative_distance():
"""Test optional kwarg allow_negative"""
error_message = (
r"^distance must be >= 0\. Use the argument `allow_negative=True` to allow "
r"negative values\.$"
)
with pytest.raises(ValueError, match=error_message):
Distance([-2, 3.1], u.kpc)
with pytest.raises(ValueError, match=error_message):
Distance([-2, -3.1], u.kpc)
with pytest.raises(ValueError, match=error_message):
Distance(-2, u.kpc)
d = Distance(-2, u.kpc, allow_negative=True)
assert d.value == -2
def test_distance_comparison():
"""Ensure comparisons of distances work (#2206, #2250)"""
a = Distance(15 * u.kpc)
b = Distance(15 * u.kpc)
assert a == b
c = Distance(1.0 * u.Mpc)
assert a < c
def test_distance_to_quantity_when_not_units_of_length():
"""Any operation that leaves units other than those of length
should turn a distance into a quantity (#2206, #2250)"""
d = Distance(15 * u.kpc)
twice = 2.0 * d
assert isinstance(twice, Distance)
area = 4.0 * np.pi * d**2
assert area.unit.is_equivalent(u.m**2)
assert not isinstance(area, Distance)
assert type(area) is u.Quantity
def test_distance_nan():
# Check that giving NaNs to Distance doesn't emit a warning
Distance([0, np.nan, 1] * u.m)
|
f3f1a93d5b6a0091e8daab92dd8a0c5db5b077c2ee0f6602b67dd956eb93caa1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test initialization of angles not already covered by the API tests"""
import pickle
import numpy as np
import pytest
from astropy import constants
from astropy import units as u
from astropy.coordinates.angles import Latitude, Longitude
from astropy.coordinates.earth import ELLIPSOIDS, EarthLocation
from astropy.coordinates.name_resolve import NameResolveError
from astropy.time import Time
from astropy.units import allclose as quantity_allclose
def allclose_m14(a, b, rtol=1.0e-14, atol=None):
if atol is None:
atol = 1.0e-14 * getattr(a, "unit", 1)
return quantity_allclose(a, b, rtol, atol)
def allclose_m8(a, b, rtol=1.0e-8, atol=None):
if atol is None:
atol = 1.0e-8 * getattr(a, "unit", 1)
return quantity_allclose(a, b, rtol, atol)
def isclose_m14(val, ref):
return np.array([allclose_m14(v, r) for (v, r) in zip(val, ref)])
def isclose_m8(val, ref):
return np.array([allclose_m8(v, r) for (v, r) in zip(val, ref)])
def vvd(val, valok, dval, func, test, status):
"""Mimic routine of erfa/src/t_erfa_c.c (to help copy & paste)"""
assert quantity_allclose(val, valok * val.unit, atol=dval * val.unit)
def test_gc2gd():
"""Test that we reproduce erfa/src/t_erfa_c.c t_gc2gd"""
x, y, z = (2e6, 3e6, 5.244e6)
status = 0 # help for copy & paste of vvd
location = EarthLocation.from_geocentric(x, y, z, u.m)
e, p, h = location.to_geodetic("WGS84")
e, p, h = e.to(u.radian), p.to(u.radian), h.to(u.m)
vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e2", status)
vvd(p, 0.97160184820607853, 1e-14, "eraGc2gd", "p2", status)
vvd(h, 331.41731754844348, 1e-8, "eraGc2gd", "h2", status)
e, p, h = location.to_geodetic("GRS80")
e, p, h = e.to(u.radian), p.to(u.radian), h.to(u.m)
vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e2", status)
vvd(p, 0.97160184820607853, 1e-14, "eraGc2gd", "p2", status)
vvd(h, 331.41731754844348, 1e-8, "eraGc2gd", "h2", status)
e, p, h = location.to_geodetic("WGS72")
e, p, h = e.to(u.radian), p.to(u.radian), h.to(u.m)
vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e3", status)
vvd(p, 0.97160181811015119, 1e-14, "eraGc2gd", "p3", status)
vvd(h, 333.27707261303181, 1e-8, "eraGc2gd", "h3", status)
def test_gd2gc():
"""Test that we reproduce erfa/src/t_erfa_c.c t_gd2gc"""
e = 3.1 * u.rad
p = -0.5 * u.rad
h = 2500.0 * u.m
status = 0 # help for copy & paste of vvd
location = EarthLocation.from_geodetic(e, p, h, ellipsoid="WGS84")
xyz = tuple(v.to(u.m) for v in location.to_geocentric())
vvd(xyz[0], -5599000.5577049947, 1e-7, "eraGd2gc", "0/1", status)
vvd(xyz[1], 233011.67223479203, 1e-7, "eraGd2gc", "1/1", status)
vvd(xyz[2], -3040909.4706983363, 1e-7, "eraGd2gc", "2/1", status)
location = EarthLocation.from_geodetic(e, p, h, ellipsoid="GRS80")
xyz = tuple(v.to(u.m) for v in location.to_geocentric())
vvd(xyz[0], -5599000.5577260984, 1e-7, "eraGd2gc", "0/2", status)
vvd(xyz[1], 233011.6722356703, 1e-7, "eraGd2gc", "1/2", status)
vvd(xyz[2], -3040909.4706095476, 1e-7, "eraGd2gc", "2/2", status)
location = EarthLocation.from_geodetic(e, p, h, ellipsoid="WGS72")
xyz = tuple(v.to(u.m) for v in location.to_geocentric())
vvd(xyz[0], -5598998.7626301490, 1e-7, "eraGd2gc", "0/3", status)
vvd(xyz[1], 233011.5975297822, 1e-7, "eraGd2gc", "1/3", status)
vvd(xyz[2], -3040908.6861467111, 1e-7, "eraGd2gc", "2/3", status)
class TestInput:
def setup_method(self):
self.lon = Longitude(
[0.0, 45.0, 90.0, 135.0, 180.0, -180, -90, -45],
u.deg,
wrap_angle=180 * u.deg,
)
self.lat = Latitude([+0.0, 30.0, 60.0, +90.0, -90.0, -60.0, -30.0, 0.0], u.deg)
self.h = u.Quantity([0.1, 0.5, 1.0, -0.5, -1.0, +4.2, -11.0, -0.1], u.m)
self.location = EarthLocation.from_geodetic(self.lon, self.lat, self.h)
self.x, self.y, self.z = self.location.to_geocentric()
def test_default_ellipsoid(self):
assert self.location.ellipsoid == EarthLocation._ellipsoid
def test_geo_attributes(self):
assert all(
np.all(_1 == _2)
for _1, _2 in zip(self.location.geodetic, self.location.to_geodetic())
)
assert all(
np.all(_1 == _2)
for _1, _2 in zip(self.location.geocentric, self.location.to_geocentric())
)
def test_attribute_classes(self):
"""Test that attribute classes are correct (and not EarthLocation)"""
assert type(self.location.x) is u.Quantity
assert type(self.location.y) is u.Quantity
assert type(self.location.z) is u.Quantity
assert type(self.location.lon) is Longitude
assert type(self.location.lat) is Latitude
assert type(self.location.height) is u.Quantity
def test_input(self):
"""Check input is parsed correctly"""
# units of length should be assumed geocentric
geocentric = EarthLocation(self.x, self.y, self.z)
assert np.all(geocentric == self.location)
geocentric2 = EarthLocation(
self.x.value, self.y.value, self.z.value, self.x.unit
)
assert np.all(geocentric2 == self.location)
geodetic = EarthLocation(self.lon, self.lat, self.h)
assert np.all(geodetic == self.location)
geodetic2 = EarthLocation(
self.lon.to_value(u.degree),
self.lat.to_value(u.degree),
self.h.to_value(u.m),
)
assert np.all(geodetic2 == self.location)
geodetic3 = EarthLocation(self.lon, self.lat)
assert allclose_m14(geodetic3.lon.value, self.location.lon.value)
assert allclose_m14(geodetic3.lat.value, self.location.lat.value)
assert not np.any(
isclose_m14(geodetic3.height.value, self.location.height.value)
)
geodetic4 = EarthLocation(self.lon, self.lat, self.h[-1])
assert allclose_m14(geodetic4.lon.value, self.location.lon.value)
assert allclose_m14(geodetic4.lat.value, self.location.lat.value)
assert allclose_m14(geodetic4.height[-1].value, self.location.height[-1].value)
assert not np.any(
isclose_m14(geodetic4.height[:-1].value, self.location.height[:-1].value)
)
# check length unit preservation
geocentric5 = EarthLocation(self.x, self.y, self.z, u.pc)
assert geocentric5.unit is u.pc
assert geocentric5.x.unit is u.pc
assert geocentric5.height.unit is u.pc
assert allclose_m14(geocentric5.x.to_value(self.x.unit), self.x.value)
geodetic5 = EarthLocation(self.lon, self.lat, self.h.to(u.pc))
assert geodetic5.unit is u.pc
assert geodetic5.x.unit is u.pc
assert geodetic5.height.unit is u.pc
assert allclose_m14(geodetic5.x.to_value(self.x.unit), self.x.value)
def test_invalid_input(self):
"""Check invalid input raises exception"""
# incomprehensible by either raises TypeError
with pytest.raises(TypeError):
EarthLocation(self.lon, self.y, self.z)
# wrong units
with pytest.raises(u.UnitsError):
EarthLocation.from_geocentric(self.lon, self.lat, self.lat)
# inconsistent units
with pytest.raises(u.UnitsError):
EarthLocation.from_geocentric(self.h, self.lon, self.lat)
# floats without a unit
with pytest.raises(TypeError):
EarthLocation.from_geocentric(self.x.value, self.y.value, self.z.value)
# inconsistent shape
with pytest.raises(ValueError):
EarthLocation.from_geocentric(self.x, self.y, self.z[:5])
# inconsistent units
with pytest.raises(u.UnitsError):
EarthLocation.from_geodetic(self.x, self.y, self.z)
# inconsistent shape
with pytest.raises(ValueError):
EarthLocation.from_geodetic(self.lon, self.lat, self.h[:5])
def test_slicing(self):
# test on WGS72 location, so we can check the ellipsoid is passed on
locwgs72 = EarthLocation.from_geodetic(
self.lon, self.lat, self.h, ellipsoid="WGS72"
)
loc_slice1 = locwgs72[4]
assert isinstance(loc_slice1, EarthLocation)
assert loc_slice1.unit is locwgs72.unit
assert loc_slice1.ellipsoid == locwgs72.ellipsoid == "WGS72"
assert not loc_slice1.shape
with pytest.raises(TypeError):
loc_slice1[0]
with pytest.raises(IndexError):
len(loc_slice1)
loc_slice2 = locwgs72[4:6]
assert isinstance(loc_slice2, EarthLocation)
assert len(loc_slice2) == 2
assert loc_slice2.unit is locwgs72.unit
assert loc_slice2.ellipsoid == locwgs72.ellipsoid
assert loc_slice2.shape == (2,)
loc_x = locwgs72["x"]
assert type(loc_x) is u.Quantity
assert loc_x.shape == locwgs72.shape
assert loc_x.unit is locwgs72.unit
def test_invalid_ellipsoid(self):
# unknown ellipsoid
with pytest.raises(ValueError):
EarthLocation.from_geodetic(self.lon, self.lat, self.h, ellipsoid="foo")
with pytest.raises(TypeError):
EarthLocation(self.lon, self.lat, self.h, ellipsoid="foo")
with pytest.raises(ValueError):
self.location.ellipsoid = "foo"
with pytest.raises(ValueError):
self.location.to_geodetic("foo")
@pytest.mark.parametrize("ellipsoid", ELLIPSOIDS)
def test_ellipsoid(self, ellipsoid):
"""Test that different ellipsoids are understood, and differ"""
# check that heights differ for different ellipsoids
# need different tolerance, since heights are relative to ~6000 km
lon, lat, h = self.location.to_geodetic(ellipsoid)
if ellipsoid == self.location.ellipsoid:
assert allclose_m8(h.value, self.h.value)
else:
# Some heights are very similar for some; some lon, lat identical.
assert not np.all(isclose_m8(h.value, self.h.value))
# given lon, lat, height, check that x,y,z differ
location = EarthLocation.from_geodetic(
self.lon, self.lat, self.h, ellipsoid=ellipsoid
)
if ellipsoid == self.location.ellipsoid:
assert allclose_m14(location.z.value, self.z.value)
else:
assert not np.all(isclose_m14(location.z.value, self.z.value))
def test_to_value(self):
loc = self.location
loc_ndarray = loc.view(np.ndarray)
assert np.all(loc.value == loc_ndarray)
loc2 = self.location.to(u.km)
loc2_ndarray = np.empty_like(loc_ndarray)
for coo in "x", "y", "z":
loc2_ndarray[coo] = loc_ndarray[coo] / 1000.0
assert np.all(loc2.value == loc2_ndarray)
loc2_value = self.location.to_value(u.km)
assert np.all(loc2_value == loc2_ndarray)
def test_pickling():
"""Regression test against #4304."""
el = EarthLocation(0.0 * u.m, 6000 * u.km, 6000 * u.km)
s = pickle.dumps(el)
el2 = pickle.loads(s)
assert el == el2
def test_repr_latex():
"""
Regression test for issue #4542
"""
somelocation = EarthLocation(lon="149:3:57.9", lat="-31:16:37.3")
somelocation._repr_latex_()
somelocation2 = EarthLocation(lon=[1.0, 2.0] * u.deg, lat=[-1.0, 9.0] * u.deg)
somelocation2._repr_latex_()
@pytest.mark.remote_data
# TODO: this parametrize should include a second option with a valid Google API
# key. For example, we should make an API key for Astropy, and add it to GitHub Actions
# as an environment variable (for security).
@pytest.mark.parametrize("google_api_key", [None])
def test_of_address(google_api_key):
NYC_lon = -74.0 * u.deg
NYC_lat = 40.7 * u.deg
# ~10 km tolerance to address difference between OpenStreetMap and Google
# for "New York, NY". This doesn't matter in practice because this test is
# only used to verify that the query succeeded, not that the returned
# position is precise.
NYC_tol = 0.1 * u.deg
# just a location
try:
loc = EarthLocation.of_address("New York, NY")
except NameResolveError as e:
# API limit might surface even here in CI.
if "unknown failure with" not in str(e):
pytest.xfail(str(e))
else:
assert quantity_allclose(loc.lat, NYC_lat, atol=NYC_tol)
assert quantity_allclose(loc.lon, NYC_lon, atol=NYC_tol)
assert np.allclose(loc.height.value, 0.0)
# Put this one here as buffer to get around Google map API limit per sec.
# no match: This always raises NameResolveError
with pytest.raises(NameResolveError):
EarthLocation.of_address("lkjasdflkja")
if google_api_key is not None:
# a location and height
try:
loc = EarthLocation.of_address("New York, NY", get_height=True)
except NameResolveError as e:
# Buffer above sometimes insufficient to get around API limit but
# we also do not want to drag things out with time.sleep(0.195),
# where 0.195 was empirically determined on some physical machine.
pytest.xfail(str(e.value))
else:
assert quantity_allclose(loc.lat, NYC_lat, atol=NYC_tol)
assert quantity_allclose(loc.lon, NYC_lon, atol=NYC_tol)
assert quantity_allclose(loc.height, 10.438 * u.meter, atol=1.0 * u.cm)
def test_geodetic_tuple():
lat = 2 * u.deg
lon = 10 * u.deg
height = 100 * u.m
el = EarthLocation.from_geodetic(lat=lat, lon=lon, height=height)
res1 = el.to_geodetic()
res2 = el.geodetic
assert res1.lat == res2.lat and quantity_allclose(res1.lat, lat)
assert res1.lon == res2.lon and quantity_allclose(res1.lon, lon)
assert res1.height == res2.height and quantity_allclose(res1.height, height)
def test_gravitational_redshift():
someloc = EarthLocation(lon=-87.7 * u.deg, lat=37 * u.deg)
sometime = Time("2017-8-21 18:26:40")
zg0 = someloc.gravitational_redshift(sometime)
# should be of order ~few mm/s change per week
zg_week = someloc.gravitational_redshift(sometime + 7 * u.day)
assert 1.0 * u.mm / u.s < abs(zg_week - zg0) < 1 * u.cm / u.s
# ~cm/s over a half-year
zg_halfyear = someloc.gravitational_redshift(sometime + 0.5 * u.yr)
assert 1 * u.cm / u.s < abs(zg_halfyear - zg0) < 1 * u.dm / u.s
# but when back to the same time in a year, should be tenths of mm
# even over decades
zg_year = someloc.gravitational_redshift(sometime - 20 * u.year)
assert 0.1 * u.mm / u.s < abs(zg_year - zg0) < 1 * u.mm / u.s
# Check mass adjustments.
# If Jupiter and the moon are ignored, effect should be off by ~ .5 mm/s
masses = {
"sun": constants.G * constants.M_sun,
"jupiter": 0 * constants.G * u.kg,
"moon": 0 * constants.G * u.kg,
}
zg_moonjup = someloc.gravitational_redshift(sometime, masses=masses)
assert 0.1 * u.mm / u.s < abs(zg_moonjup - zg0) < 1 * u.mm / u.s
# Check that simply not including the bodies gives the same result.
assert zg_moonjup == someloc.gravitational_redshift(sometime, bodies=("sun",))
# And that earth can be given, even not as last argument
assert zg_moonjup == someloc.gravitational_redshift(
sometime, bodies=("earth", "sun")
)
# If the earth is also ignored, effect should be off by ~ 20 cm/s
# This also tests the conversion of kg to gravitational units.
masses["earth"] = 0 * u.kg
zg_moonjupearth = someloc.gravitational_redshift(sometime, masses=masses)
assert 1 * u.dm / u.s < abs(zg_moonjupearth - zg0) < 1 * u.m / u.s
# If all masses are zero, redshift should be 0 as well.
masses["sun"] = 0 * u.kg
assert someloc.gravitational_redshift(sometime, masses=masses) == 0
with pytest.raises(KeyError):
someloc.gravitational_redshift(sometime, bodies=("saturn",))
with pytest.raises(u.UnitsError):
masses = {
"sun": constants.G * constants.M_sun,
"jupiter": constants.G * constants.M_jup,
"moon": 1 * u.km, # wrong units!
"earth": constants.G * constants.M_earth,
}
someloc.gravitational_redshift(sometime, masses=masses)
def test_read_only_input():
lon = np.array([80.0, 440.0]) * u.deg
lat = np.array([45.0]) * u.deg
lon.flags.writeable = lat.flags.writeable = False
loc = EarthLocation.from_geodetic(lon=lon, lat=lat)
assert quantity_allclose(loc[1].x, loc[0].x)
def test_info():
EarthLocation._get_site_registry(force_builtin=True)
greenwich = EarthLocation.of_site("greenwich")
assert str(greenwich.info).startswith("name = Royal Observatory Greenwich")
|
8b49de5c61f29fd578415bdc7832158fcae878710832a4c1a3dac68c9aa974e2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy import constants
from astropy import units as u
from astropy.coordinates import (
CartesianDifferential,
CartesianRepresentation,
DynamicMatrixTransform,
FunctionTransformWithFiniteDifference,
SphericalDifferential,
SphericalRepresentation,
TimeAttribute,
get_sun,
)
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.builtin_frames import FK5, GCRS, ICRS, LSR, AltAz, Galactic
from astropy.coordinates.sites import get_builtin_sites
from astropy.time import Time
from astropy.units import allclose as quantity_allclose
J2000 = Time("J2000")
@pytest.mark.parametrize(
"dt, symmetric",
[
(1 * u.second, True),
(1 * u.year, True),
(1 * u.second, False),
(1 * u.year, False),
],
)
def test_faux_lsr(dt, symmetric):
class LSR2(LSR):
obstime = TimeAttribute(default=J2000)
@frame_transform_graph.transform(
FunctionTransformWithFiniteDifference,
ICRS,
LSR2,
finite_difference_dt=dt,
symmetric_finite_difference=symmetric,
)
def icrs_to_lsr(icrs_coo, lsr_frame):
dt = lsr_frame.obstime - J2000
offset = lsr_frame.v_bary * dt.to(u.second)
return lsr_frame.realize_frame(icrs_coo.data.without_differentials() + offset)
@frame_transform_graph.transform(
FunctionTransformWithFiniteDifference,
LSR2,
ICRS,
finite_difference_dt=dt,
symmetric_finite_difference=symmetric,
)
def lsr_to_icrs(lsr_coo, icrs_frame):
dt = lsr_coo.obstime - J2000
offset = lsr_coo.v_bary * dt.to(u.second)
return icrs_frame.realize_frame(lsr_coo.data - offset)
ic = ICRS(
ra=12.3 * u.deg,
dec=45.6 * u.deg,
distance=7.8 * u.au,
pm_ra_cosdec=0 * u.marcsec / u.yr,
pm_dec=0 * u.marcsec / u.yr,
radial_velocity=0 * u.km / u.s,
)
lsrc = ic.transform_to(LSR2())
assert quantity_allclose(ic.cartesian.xyz, lsrc.cartesian.xyz)
idiff = ic.cartesian.differentials["s"]
ldiff = lsrc.cartesian.differentials["s"]
change = (ldiff.d_xyz - idiff.d_xyz).to(u.km / u.s)
totchange = np.sum(change**2) ** 0.5
assert quantity_allclose(totchange, np.sum(lsrc.v_bary.d_xyz**2) ** 0.5)
ic2 = ICRS(
ra=120.3 * u.deg,
dec=45.6 * u.deg,
distance=7.8 * u.au,
pm_ra_cosdec=0 * u.marcsec / u.yr,
pm_dec=10 * u.marcsec / u.yr,
radial_velocity=1000 * u.km / u.s,
)
lsrc2 = ic2.transform_to(LSR2())
ic2_roundtrip = lsrc2.transform_to(ICRS())
tot = np.sum(lsrc2.cartesian.differentials["s"].d_xyz ** 2) ** 0.5
assert np.abs(tot.to("km/s") - 1000 * u.km / u.s) < 20 * u.km / u.s
assert quantity_allclose(ic2.cartesian.xyz, ic2_roundtrip.cartesian.xyz)
def test_faux_fk5_galactic():
from astropy.coordinates.builtin_frames.galactic_transforms import (
_gal_to_fk5,
fk5_to_gal,
)
class Galactic2(Galactic):
pass
dt = 1000 * u.s
@frame_transform_graph.transform(
FunctionTransformWithFiniteDifference,
FK5,
Galactic2,
finite_difference_dt=dt,
symmetric_finite_difference=True,
finite_difference_frameattr_name=None,
)
def fk5_to_gal2(fk5_coo, gal_frame):
trans = DynamicMatrixTransform(fk5_to_gal, FK5, Galactic2)
return trans(fk5_coo, gal_frame)
@frame_transform_graph.transform(
FunctionTransformWithFiniteDifference,
Galactic2,
ICRS,
finite_difference_dt=dt,
symmetric_finite_difference=True,
finite_difference_frameattr_name=None,
)
def gal2_to_fk5(gal_coo, fk5_frame):
trans = DynamicMatrixTransform(_gal_to_fk5, Galactic2, FK5)
return trans(gal_coo, fk5_frame)
c1 = FK5(
ra=150 * u.deg,
dec=-17 * u.deg,
radial_velocity=83 * u.km / u.s,
pm_ra_cosdec=-41 * u.mas / u.yr,
pm_dec=16 * u.mas / u.yr,
distance=150 * u.pc,
)
c2 = c1.transform_to(Galactic2())
c3 = c1.transform_to(Galactic())
# compare the matrix and finite-difference calculations
assert quantity_allclose(c2.pm_l_cosb, c3.pm_l_cosb, rtol=1e-4)
assert quantity_allclose(c2.pm_b, c3.pm_b, rtol=1e-4)
def test_gcrs_diffs():
time = Time("2017-01-01")
gf = GCRS(obstime=time)
sung = get_sun(time) # should have very little vhelio
# qtr-year off sun location should be the direction of ~ maximal vhelio
qtrsung = get_sun(time - 0.25 * u.year)
# now we use those essentially as directions where the velocities should
# be either maximal or minimal - with or perpendiculat to Earh's orbit
msungr = CartesianRepresentation(-sung.cartesian.xyz).represent_as(
SphericalRepresentation
)
suni = ICRS(
ra=msungr.lon,
dec=msungr.lat,
distance=100 * u.au,
pm_ra_cosdec=0 * u.marcsec / u.yr,
pm_dec=0 * u.marcsec / u.yr,
radial_velocity=0 * u.km / u.s,
)
qtrsuni = ICRS(
ra=qtrsung.ra,
dec=qtrsung.dec,
distance=100 * u.au,
pm_ra_cosdec=0 * u.marcsec / u.yr,
pm_dec=0 * u.marcsec / u.yr,
radial_velocity=0 * u.km / u.s,
)
# Now we transform those parallel- and perpendicular-to Earth's orbit
# directions to GCRS, which should shift the velocity to either include
# the Earth's velocity vector, or not (for parallel and perpendicular,
# respectively).
sung = suni.transform_to(gf)
qtrsung = qtrsuni.transform_to(gf)
# should be high along the ecliptic-not-sun sun axis and
# low along the sun axis
assert np.abs(qtrsung.radial_velocity) > 30 * u.km / u.s
assert np.abs(qtrsung.radial_velocity) < 40 * u.km / u.s
assert np.abs(sung.radial_velocity) < 1 * u.km / u.s
suni2 = sung.transform_to(ICRS())
assert np.all(np.abs(suni2.data.differentials["s"].d_xyz) < 3e-5 * u.km / u.s)
qtrisun2 = qtrsung.transform_to(ICRS())
assert np.all(np.abs(qtrisun2.data.differentials["s"].d_xyz) < 3e-5 * u.km / u.s)
def test_altaz_diffs():
time = Time("J2015") + np.linspace(-1, 1, 1000) * u.day
loc = get_builtin_sites()["greenwich"]
aa = AltAz(obstime=time, location=loc)
icoo = ICRS(
np.zeros(time.shape) * u.deg,
10 * u.deg,
100 * u.au,
pm_ra_cosdec=np.zeros(time.shape) * u.marcsec / u.yr,
pm_dec=0 * u.marcsec / u.yr,
radial_velocity=0 * u.km / u.s,
)
acoo = icoo.transform_to(aa)
# Make sure the change in radial velocity over ~2 days isn't too much
# more than the rotation speed of the Earth - some excess is expected
# because the orbit also shifts the RV, but it should be pretty small
# over this short a time.
assert (
np.ptp(acoo.radial_velocity) / 2 < (2 * np.pi * constants.R_earth / u.day) * 1.2
) # MAGIC NUMBER
cdiff = acoo.data.differentials["s"].represent_as(CartesianDifferential, acoo.data)
# The "total" velocity should be > c, because the *tangential* velocity
# isn't a True velocity, but rather an induced velocity due to the Earth's
# rotation at a distance of 100 AU
assert np.all(np.sum(cdiff.d_xyz**2, axis=0) ** 0.5 > constants.c)
_xfail = pytest.mark.xfail
@pytest.mark.parametrize(
"distance",
[
1000 * u.au,
10 * u.pc,
pytest.param(10 * u.kpc, marks=_xfail),
pytest.param(100 * u.kpc, marks=_xfail),
],
)
# TODO: make these not fail when the
# finite-difference numerical stability
# is improved
def test_numerical_limits(distance):
"""
Tests the numerical stability of the default settings for the finite
difference transformation calculation. This is *known* to fail for at
>~1kpc, but this may be improved in future versions.
"""
time = Time("J2017") + np.linspace(-0.5, 0.5, 100) * u.year
icoo = ICRS(
ra=0 * u.deg,
dec=10 * u.deg,
distance=distance,
pm_ra_cosdec=0 * u.marcsec / u.yr,
pm_dec=0 * u.marcsec / u.yr,
radial_velocity=0 * u.km / u.s,
)
gcoo = icoo.transform_to(GCRS(obstime=time))
rv = gcoo.radial_velocity.to("km/s")
# if its a lot bigger than this - ~the maximal velocity shift along
# the direction above with a small allowance for noise - finite-difference
# rounding errors have ruined the calculation
assert np.ptp(rv) < 65 * u.km / u.s
def diff_info_plot(frame, time):
"""
Useful for plotting a frame with multiple times. *Not* used in the testing
suite per se, but extremely useful for interactive plotting of results from
tests in this module.
"""
from matplotlib import pyplot as plt
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(20, 12))
ax1.plot_date(
time.plot_date, frame.data.differentials["s"].d_xyz.to(u.km / u.s).T, fmt="-"
)
ax1.legend(["x", "y", "z"])
ax2.plot_date(
time.plot_date,
np.sum(frame.data.differentials["s"].d_xyz.to(u.km / u.s) ** 2, axis=0) ** 0.5,
fmt="-",
)
ax2.set_title("total")
sd = frame.data.differentials["s"].represent_as(SphericalDifferential, frame.data)
ax3.plot_date(time.plot_date, sd.d_distance.to(u.km / u.s), fmt="-")
ax3.set_title("radial")
ax4.plot_date(time.plot_date, sd.d_lat.to(u.marcsec / u.yr), fmt="-", label="lat")
ax4.plot_date(time.plot_date, sd.d_lon.to(u.marcsec / u.yr), fmt="-", label="lon")
return fig
|
f181052344efd6cd99c9f5314b52d2aae1bd0479c85aa6f9be5ea851e915dee1 | """
This series of functions are used to generate the reference CSV files
used by the accuracy tests. Running this as a command-line script will
generate them all.
"""
import os
import numpy as np
from astropy.table import Column, Table
def ref_fk4_no_e_fk4(fnout="fk4_no_e_fk4.csv"):
"""
Accuracy tests for the FK4 (with no E-terms of aberration) to/from FK4
conversion, with arbitrary equinoxes and epoch of observation.
"""
import starlink.Ast as Ast
np.random.seed(12345)
N = 200
# Sample uniformly on the unit sphere. These will be either the FK4
# coordinates for the transformation to FK5, or the FK5 coordinates for the
# transformation to FK4.
ra = np.random.uniform(0.0, 360.0, N)
dec = np.degrees(np.arcsin(np.random.uniform(-1.0, 1.0, N)))
# Generate random observation epoch and equinoxes
obstime = [f"B{x:7.2f}" for x in np.random.uniform(1950.0, 2000.0, N)]
ra_fk4ne, dec_fk4ne = [], []
ra_fk4, dec_fk4 = [], []
for i in range(N):
# Set up frames for AST
frame_fk4ne = Ast.SkyFrame(f"System=FK4-NO-E,Epoch={obstime[i]},Equinox=B1950")
frame_fk4 = Ast.SkyFrame(f"System=FK4,Epoch={obstime[i]},Equinox=B1950")
# FK4 to FK4 (no E-terms)
frameset = frame_fk4.convert(frame_fk4ne)
coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]]))
ra_fk4ne.append(coords[0, 0])
dec_fk4ne.append(coords[1, 0])
# FK4 (no E-terms) to FK4
frameset = frame_fk4ne.convert(frame_fk4)
coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]]))
ra_fk4.append(coords[0, 0])
dec_fk4.append(coords[1, 0])
# Write out table to a CSV file
t = Table()
t.add_column(Column(name="obstime", data=obstime))
t.add_column(Column(name="ra_in", data=ra))
t.add_column(Column(name="dec_in", data=dec))
t.add_column(Column(name="ra_fk4ne", data=ra_fk4ne))
t.add_column(Column(name="dec_fk4ne", data=dec_fk4ne))
t.add_column(Column(name="ra_fk4", data=ra_fk4))
t.add_column(Column(name="dec_fk4", data=dec_fk4))
f = open(os.path.join("data", fnout), "wb")
f.write(
f"# This file was generated with the {os.path.basename(__file__)} script, and"
" the reference values were computed using AST\n"
)
t.write(f, format="ascii", delimiter=",")
def ref_fk4_no_e_fk5(fnout="fk4_no_e_fk5.csv"):
"""
Accuracy tests for the FK4 (with no E-terms of aberration) to/from FK5
conversion, with arbitrary equinoxes and epoch of observation.
"""
import starlink.Ast as Ast
np.random.seed(12345)
N = 200
# Sample uniformly on the unit sphere. These will be either the FK4
# coordinates for the transformation to FK5, or the FK5 coordinates for the
# transformation to FK4.
ra = np.random.uniform(0.0, 360.0, N)
dec = np.degrees(np.arcsin(np.random.uniform(-1.0, 1.0, N)))
# Generate random observation epoch and equinoxes
obstime = [f"B{x:7.2f}" for x in np.random.uniform(1950.0, 2000.0, N)]
equinox_fk4 = [f"B{x:7.2f}" for x in np.random.uniform(1925.0, 1975.0, N)]
equinox_fk5 = [f"J{x:7.2f}" for x in np.random.uniform(1975.0, 2025.0, N)]
ra_fk4, dec_fk4 = [], []
ra_fk5, dec_fk5 = [], []
for i in range(N):
# Set up frames for AST
frame_fk4 = Ast.SkyFrame(
f"System=FK4-NO-E,Epoch={obstime[i]},Equinox={equinox_fk4[i]}"
)
frame_fk5 = Ast.SkyFrame(
f"System=FK5,Epoch={obstime[i]},Equinox={equinox_fk5[i]}"
)
# FK4 to FK5
frameset = frame_fk4.convert(frame_fk5)
coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]]))
ra_fk5.append(coords[0, 0])
dec_fk5.append(coords[1, 0])
# FK5 to FK4
frameset = frame_fk5.convert(frame_fk4)
coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]]))
ra_fk4.append(coords[0, 0])
dec_fk4.append(coords[1, 0])
# Write out table to a CSV file
t = Table()
t.add_column(Column(name="equinox_fk4", data=equinox_fk4))
t.add_column(Column(name="equinox_fk5", data=equinox_fk5))
t.add_column(Column(name="obstime", data=obstime))
t.add_column(Column(name="ra_in", data=ra))
t.add_column(Column(name="dec_in", data=dec))
t.add_column(Column(name="ra_fk5", data=ra_fk5))
t.add_column(Column(name="dec_fk5", data=dec_fk5))
t.add_column(Column(name="ra_fk4", data=ra_fk4))
t.add_column(Column(name="dec_fk4", data=dec_fk4))
f = open(os.path.join("data", fnout), "wb")
f.write(
f"# This file was generated with the {os.path.basename(__file__)} script, and"
" the reference values were computed using AST\n"
)
t.write(f, format="ascii", delimiter=",")
def ref_galactic_fk4(fnout="galactic_fk4.csv"):
"""
Accuracy tests for the ICRS (with no E-terms of aberration) to/from FK5
conversion, with arbitrary equinoxes and epoch of observation.
"""
import starlink.Ast as Ast
np.random.seed(12345)
N = 200
# Sample uniformly on the unit sphere. These will be either the ICRS
# coordinates for the transformation to FK5, or the FK5 coordinates for the
# transformation to ICRS.
lon = np.random.uniform(0.0, 360.0, N)
lat = np.degrees(np.arcsin(np.random.uniform(-1.0, 1.0, N)))
# Generate random observation epoch and equinoxes
obstime = [f"B{x:7.2f}" for x in np.random.uniform(1950.0, 2000.0, N)]
equinox_fk4 = [f"J{x:7.2f}" for x in np.random.uniform(1975.0, 2025.0, N)]
lon_gal, lat_gal = [], []
ra_fk4, dec_fk4 = [], []
for i in range(N):
# Set up frames for AST
frame_gal = Ast.SkyFrame(f"System=Galactic,Epoch={obstime[i]}")
frame_fk4 = Ast.SkyFrame(
f"System=FK4,Epoch={obstime[i]},Equinox={equinox_fk4[i]}"
)
# ICRS to FK5
frameset = frame_gal.convert(frame_fk4)
coords = np.degrees(frameset.tran([[np.radians(lon[i])], [np.radians(lat[i])]]))
ra_fk4.append(coords[0, 0])
dec_fk4.append(coords[1, 0])
# FK5 to ICRS
frameset = frame_fk4.convert(frame_gal)
coords = np.degrees(frameset.tran([[np.radians(lon[i])], [np.radians(lat[i])]]))
lon_gal.append(coords[0, 0])
lat_gal.append(coords[1, 0])
# Write out table to a CSV file
t = Table()
t.add_column(Column(name="equinox_fk4", data=equinox_fk4))
t.add_column(Column(name="obstime", data=obstime))
t.add_column(Column(name="lon_in", data=lon))
t.add_column(Column(name="lat_in", data=lat))
t.add_column(Column(name="ra_fk4", data=ra_fk4))
t.add_column(Column(name="dec_fk4", data=dec_fk4))
t.add_column(Column(name="lon_gal", data=lon_gal))
t.add_column(Column(name="lat_gal", data=lat_gal))
f = open(os.path.join("data", fnout), "wb")
f.write(
f"# This file was generated with the {os.path.basename(__file__)} script, and"
" the reference values were computed using AST\n"
)
t.write(f, format="ascii", delimiter=",")
def ref_icrs_fk5(fnout="icrs_fk5.csv"):
"""
Accuracy tests for the ICRS (with no E-terms of aberration) to/from FK5
conversion, with arbitrary equinoxes and epoch of observation.
"""
import starlink.Ast as Ast
np.random.seed(12345)
N = 200
# Sample uniformly on the unit sphere. These will be either the ICRS
# coordinates for the transformation to FK5, or the FK5 coordinates for the
# transformation to ICRS.
ra = np.random.uniform(0.0, 360.0, N)
dec = np.degrees(np.arcsin(np.random.uniform(-1.0, 1.0, N)))
# Generate random observation epoch and equinoxes
obstime = [f"B{x:7.2f}" for x in np.random.uniform(1950.0, 2000.0, N)]
equinox_fk5 = [f"J{x:7.2f}" for x in np.random.uniform(1975.0, 2025.0, N)]
ra_icrs, dec_icrs = [], []
ra_fk5, dec_fk5 = [], []
for i in range(N):
# Set up frames for AST
frame_icrs = Ast.SkyFrame(f"System=ICRS,Epoch={obstime[i]}")
frame_fk5 = Ast.SkyFrame(
f"System=FK5,Epoch={obstime[i]},Equinox={equinox_fk5[i]}"
)
# ICRS to FK5
frameset = frame_icrs.convert(frame_fk5)
coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]]))
ra_fk5.append(coords[0, 0])
dec_fk5.append(coords[1, 0])
# FK5 to ICRS
frameset = frame_fk5.convert(frame_icrs)
coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]]))
ra_icrs.append(coords[0, 0])
dec_icrs.append(coords[1, 0])
# Write out table to a CSV file
t = Table()
t.add_column(Column(name="equinox_fk5", data=equinox_fk5))
t.add_column(Column(name="obstime", data=obstime))
t.add_column(Column(name="ra_in", data=ra))
t.add_column(Column(name="dec_in", data=dec))
t.add_column(Column(name="ra_fk5", data=ra_fk5))
t.add_column(Column(name="dec_fk5", data=dec_fk5))
t.add_column(Column(name="ra_icrs", data=ra_icrs))
t.add_column(Column(name="dec_icrs", data=dec_icrs))
f = open(os.path.join("data", fnout), "wb")
f.write(
f"# This file was generated with the {os.path.basename(__file__)} script, and"
" the reference values were computed using AST\n"
)
t.write(f, format="ascii", delimiter=",")
if __name__ == "__main__":
ref_fk4_no_e_fk4()
ref_fk4_no_e_fk5()
ref_galactic_fk4()
ref_icrs_fk5()
|
8917bd049682941f30087a6c9240e19766db2c7f0d5524a87e74a134c8cb238c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy import units as u
from astropy.coordinates.angle_utilities import angular_separation
from astropy.coordinates.builtin_frames import FK4, Galactic
from astropy.table import Table
from astropy.time import Time
from astropy.utils.data import get_pkg_data_contents
# the number of tests to run
from . import N_ACCURACY_TESTS
TOLERANCE = 0.3 # arcseconds
def test_galactic_fk4():
lines = get_pkg_data_contents("data/galactic_fk4.csv").split("\n")
t = Table.read(lines, format="ascii", delimiter=",", guess=False)
if N_ACCURACY_TESTS >= len(t):
idxs = range(len(t))
else:
idxs = np.random.randint(len(t), size=N_ACCURACY_TESTS)
diffarcsec1 = []
diffarcsec2 = []
for i in idxs:
# Extract row
r = t[int(i)] # int here is to get around a py 3.x astropy.table bug
# Galactic to FK4
c1 = Galactic(l=r["lon_in"] * u.deg, b=r["lat_in"] * u.deg)
c2 = c1.transform_to(FK4(equinox=Time(r["equinox_fk4"])))
# Find difference
diff = angular_separation(
c2.ra.radian,
c2.dec.radian,
np.radians(r["ra_fk4"]),
np.radians(r["dec_fk4"]),
)
diffarcsec1.append(np.degrees(diff) * 3600.0)
# FK4 to Galactic
c1 = FK4(
ra=r["lon_in"] * u.deg,
dec=r["lat_in"] * u.deg,
obstime=Time(r["obstime"]),
equinox=Time(r["equinox_fk4"]),
)
c2 = c1.transform_to(Galactic())
# Find difference
diff = angular_separation(
c2.l.radian, c2.b.radian, np.radians(r["lon_gal"]), np.radians(r["lat_gal"])
)
diffarcsec2.append(np.degrees(diff) * 3600.0)
np.testing.assert_array_less(diffarcsec1, TOLERANCE)
np.testing.assert_array_less(diffarcsec2, TOLERANCE)
|
0523897b65dac53eeccaa37224f590966dc0f11298c0a73723990482e7234d48 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy import units as u
from astropy.coordinates.angle_utilities import angular_separation
from astropy.coordinates.builtin_frames import FK5, ICRS
from astropy.table import Table
from astropy.time import Time
from astropy.utils.data import get_pkg_data_contents
# the number of tests to run
from . import N_ACCURACY_TESTS
TOLERANCE = 0.03 # arcseconds
def test_icrs_fk5():
lines = get_pkg_data_contents("data/icrs_fk5.csv").split("\n")
t = Table.read(lines, format="ascii", delimiter=",", guess=False)
if N_ACCURACY_TESTS >= len(t):
idxs = range(len(t))
else:
idxs = np.random.randint(len(t), size=N_ACCURACY_TESTS)
diffarcsec1 = []
diffarcsec2 = []
for i in idxs:
# Extract row
r = t[int(i)] # int here is to get around a py 3.x astropy.table bug
# ICRS to FK5
c1 = ICRS(ra=r["ra_in"] * u.deg, dec=r["dec_in"] * u.deg)
c2 = c1.transform_to(FK5(equinox=Time(r["equinox_fk5"])))
# Find difference
diff = angular_separation(
c2.ra.radian,
c2.dec.radian,
np.radians(r["ra_fk5"]),
np.radians(r["dec_fk5"]),
)
diffarcsec1.append(np.degrees(diff) * 3600.0)
# FK5 to ICRS
c1 = FK5(
ra=r["ra_in"] * u.deg,
dec=r["dec_in"] * u.deg,
equinox=Time(r["equinox_fk5"]),
)
c2 = c1.transform_to(ICRS())
# Find difference
diff = angular_separation(
c2.ra.radian,
c2.dec.radian,
np.radians(r["ra_icrs"]),
np.radians(r["dec_icrs"]),
)
diffarcsec2.append(np.degrees(diff) * 3600.0)
np.testing.assert_array_less(diffarcsec1, TOLERANCE)
np.testing.assert_array_less(diffarcsec2, TOLERANCE)
|
5c31f60b85b896cb187916d89b7c15818c0b57a00ca1462b39a2773a1b7b01f0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy import units as u
from astropy.coordinates.angle_utilities import angular_separation
from astropy.coordinates.builtin_frames import FK5, FK4NoETerms
from astropy.table import Table
from astropy.time import Time
from astropy.utils.data import get_pkg_data_contents
# the number of tests to run
from . import N_ACCURACY_TESTS
TOLERANCE = 0.03 # arcseconds
def test_fk4_no_e_fk5():
lines = get_pkg_data_contents("data/fk4_no_e_fk5.csv").split("\n")
t = Table.read(lines, format="ascii", delimiter=",", guess=False)
if N_ACCURACY_TESTS >= len(t):
idxs = range(len(t))
else:
idxs = np.random.randint(len(t), size=N_ACCURACY_TESTS)
diffarcsec1 = []
diffarcsec2 = []
for i in idxs:
# Extract row
r = t[int(i)] # int here is to get around a py 3.x astropy.table bug
# FK4NoETerms to FK5
c1 = FK4NoETerms(
ra=r["ra_in"] * u.deg,
dec=r["dec_in"] * u.deg,
obstime=Time(r["obstime"]),
equinox=Time(r["equinox_fk4"]),
)
c2 = c1.transform_to(FK5(equinox=Time(r["equinox_fk5"])))
# Find difference
diff = angular_separation(
c2.ra.radian,
c2.dec.radian,
np.radians(r["ra_fk5"]),
np.radians(r["dec_fk5"]),
)
diffarcsec1.append(np.degrees(diff) * 3600.0)
# FK5 to FK4NoETerms
c1 = FK5(
ra=r["ra_in"] * u.deg,
dec=r["dec_in"] * u.deg,
equinox=Time(r["equinox_fk5"]),
)
fk4neframe = FK4NoETerms(
obstime=Time(r["obstime"]), equinox=Time(r["equinox_fk4"])
)
c2 = c1.transform_to(fk4neframe)
# Find difference
diff = angular_separation(
c2.ra.radian,
c2.dec.radian,
np.radians(r["ra_fk4"]),
np.radians(r["dec_fk4"]),
)
diffarcsec2.append(np.degrees(diff) * 3600.0)
np.testing.assert_array_less(diffarcsec1, TOLERANCE)
np.testing.assert_array_less(diffarcsec2, TOLERANCE)
|
94fe2ea0eb14b0af755d4d0748c49242f4ba5889a418be5c9d6f8d09e2f2f95c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy import units as u
from astropy.coordinates.angle_utilities import angular_separation
from astropy.coordinates.builtin_frames import FK4, FK4NoETerms
from astropy.table import Table
from astropy.time import Time
from astropy.utils.data import get_pkg_data_contents
# the number of tests to run
from . import N_ACCURACY_TESTS
# It looks as though SLALIB, which AST relies on, assumes a simplified version
# of the e-terms correction, so we have to up the tolerance a bit to get things
# to agree.
TOLERANCE = 1.0e-5 # arcseconds
def test_fk4_no_e_fk4():
lines = get_pkg_data_contents("data/fk4_no_e_fk4.csv").split("\n")
t = Table.read(lines, format="ascii", delimiter=",", guess=False)
if N_ACCURACY_TESTS >= len(t):
idxs = range(len(t))
else:
idxs = np.random.randint(len(t), size=N_ACCURACY_TESTS)
diffarcsec1 = []
diffarcsec2 = []
for i in idxs:
# Extract row
r = t[int(i)] # int here is to get around a py 3.x astropy.table bug
# FK4 to FK4NoETerms
c1 = FK4(
ra=r["ra_in"] * u.deg, dec=r["dec_in"] * u.deg, obstime=Time(r["obstime"])
)
c2 = c1.transform_to(FK4NoETerms())
# Find difference
diff = angular_separation(
c2.ra.radian,
c2.dec.radian,
np.radians(r["ra_fk4ne"]),
np.radians(r["dec_fk4ne"]),
)
diffarcsec1.append(np.degrees(diff) * 3600.0)
# FK4NoETerms to FK4
c1 = FK4NoETerms(
ra=r["ra_in"] * u.deg, dec=r["dec_in"] * u.deg, obstime=Time(r["obstime"])
)
c2 = c1.transform_to(FK4())
# Find difference
diff = angular_separation(
c2.ra.radian,
c2.dec.radian,
np.radians(r["ra_fk4"]),
np.radians(r["dec_fk4"]),
)
diffarcsec2.append(np.degrees(diff) * 3600.0)
np.testing.assert_array_less(diffarcsec1, TOLERANCE)
np.testing.assert_array_less(diffarcsec2, TOLERANCE)
|
09c038984679f0952d2d229ace292f5c72b3ca82e8b28d94f255d5128baa2969 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Accuracy tests for Ecliptic coordinate systems.
"""
import numpy as np
import pytest
from astropy import units as u
from astropy.constants import R_earth, R_sun
from astropy.coordinates import SkyCoord
from astropy.coordinates.builtin_frames import (
FK5,
GCRS,
ICRS,
BarycentricMeanEcliptic,
BarycentricTrueEcliptic,
CustomBarycentricEcliptic,
GeocentricMeanEcliptic,
GeocentricTrueEcliptic,
HeliocentricEclipticIAU76,
HeliocentricMeanEcliptic,
HeliocentricTrueEcliptic,
)
from astropy.coordinates.solar_system import get_body_barycentric_posvel
from astropy.time import Time
from astropy.units import allclose as quantity_allclose
def test_against_pytpm_doc_example():
"""
Check that Astropy's Ecliptic systems give answers consistent with pyTPM
Currently this is only testing against the example given in the pytpm docs
"""
fk5_in = SkyCoord("12h22m54.899s", "15d49m20.57s", frame=FK5(equinox="J2000"))
pytpm_out = BarycentricMeanEcliptic(
lon=178.78256462 * u.deg, lat=16.7597002513 * u.deg, equinox="J2000"
)
astropy_out = fk5_in.transform_to(pytpm_out)
assert pytpm_out.separation(astropy_out) < (1 * u.arcsec)
def test_ecliptic_heliobary():
"""
Check that the ecliptic transformations for heliocentric and barycentric
at least more or less make sense
"""
icrs = ICRS(1 * u.deg, 2 * u.deg, distance=1.5 * R_sun)
bary = icrs.transform_to(BarycentricMeanEcliptic())
helio = icrs.transform_to(HeliocentricMeanEcliptic())
# make sure there's a sizable distance shift - in 3d hundreds of km, but
# this is 1D so we allow it to be somewhat smaller
assert np.abs(bary.distance - helio.distance) > 1 * u.km
# now make something that's got the location of helio but in bary's frame.
# this is a convenience to allow `separation` to work as expected
helio_in_bary_frame = bary.realize_frame(helio.cartesian)
assert bary.separation(helio_in_bary_frame) > 1 * u.arcmin
@pytest.mark.parametrize(
("trueframe", "meanframe"),
[
(BarycentricTrueEcliptic, BarycentricMeanEcliptic),
(HeliocentricTrueEcliptic, HeliocentricMeanEcliptic),
(GeocentricTrueEcliptic, GeocentricMeanEcliptic),
(HeliocentricEclipticIAU76, HeliocentricMeanEcliptic),
],
)
def test_ecliptic_roundtrips(trueframe, meanframe):
"""
Check that the various ecliptic transformations at least roundtrip
"""
icrs = ICRS(1 * u.deg, 2 * u.deg, distance=1.5 * R_sun)
truecoo = icrs.transform_to(trueframe())
meancoo = truecoo.transform_to(meanframe())
truecoo2 = meancoo.transform_to(trueframe())
assert not quantity_allclose(truecoo.cartesian.xyz, meancoo.cartesian.xyz)
assert quantity_allclose(truecoo.cartesian.xyz, truecoo2.cartesian.xyz)
@pytest.mark.parametrize(
("trueframe", "meanframe"),
[
(BarycentricTrueEcliptic, BarycentricMeanEcliptic),
(HeliocentricTrueEcliptic, HeliocentricMeanEcliptic),
(GeocentricTrueEcliptic, GeocentricMeanEcliptic),
],
)
def test_ecliptic_true_mean_preserve_latitude(trueframe, meanframe):
"""
Check that the ecliptic true/mean transformations preserve latitude
"""
truecoo = trueframe(90 * u.deg, 0 * u.deg, distance=1 * u.AU)
meancoo = truecoo.transform_to(meanframe())
assert not quantity_allclose(truecoo.lon, meancoo.lon)
assert quantity_allclose(truecoo.lat, meancoo.lat, atol=1e-10 * u.arcsec)
@pytest.mark.parametrize(
"frame",
[HeliocentricMeanEcliptic, HeliocentricTrueEcliptic, HeliocentricEclipticIAU76],
)
def test_helioecliptic_induced_velocity(frame):
# Create a coordinate with zero speed in ICRS
time = Time("2021-01-01")
icrs = ICRS(
ra=1 * u.deg,
dec=2 * u.deg,
distance=3 * u.AU,
pm_ra_cosdec=0 * u.deg / u.s,
pm_dec=0 * u.deg / u.s,
radial_velocity=0 * u.m / u.s,
)
# Transforming to a helioecliptic frame should give an induced speed equal to the Sun's speed
transformed = icrs.transform_to(frame(obstime=time))
_, vel = get_body_barycentric_posvel("sun", time)
assert quantity_allclose(transformed.velocity.norm(), vel.norm())
# Transforming back to ICRS should get back to zero speed
back = transformed.transform_to(ICRS())
assert quantity_allclose(
back.velocity.norm(), 0 * u.m / u.s, atol=1e-10 * u.m / u.s
)
def test_ecl_geo():
"""
Check that the geocentric version at least gets well away from GCRS. For a
true "accuracy" test we need a comparison dataset that is similar to the
geocentric/GCRS comparison we want to do here. Contributions welcome!
"""
gcrs = GCRS(10 * u.deg, 20 * u.deg, distance=1.5 * R_earth)
gecl = gcrs.transform_to(GeocentricMeanEcliptic())
assert quantity_allclose(gecl.distance, gcrs.distance)
def test_arraytransforms():
"""
Test that transforms to/from ecliptic coordinates work on array coordinates
(not testing for accuracy.)
"""
ra = np.ones((4,), dtype=float) * u.deg
dec = 2 * np.ones((4,), dtype=float) * u.deg
distance = np.ones((4,), dtype=float) * u.au
test_icrs = ICRS(ra=ra, dec=dec, distance=distance)
test_gcrs = GCRS(test_icrs.data)
bary_arr = test_icrs.transform_to(BarycentricMeanEcliptic())
assert bary_arr.shape == ra.shape
helio_arr = test_icrs.transform_to(HeliocentricMeanEcliptic())
assert helio_arr.shape == ra.shape
geo_arr = test_gcrs.transform_to(GeocentricMeanEcliptic())
assert geo_arr.shape == ra.shape
# now check that we also can go back the other way without shape problems
bary_icrs = bary_arr.transform_to(ICRS())
assert bary_icrs.shape == test_icrs.shape
helio_icrs = helio_arr.transform_to(ICRS())
assert helio_icrs.shape == test_icrs.shape
geo_gcrs = geo_arr.transform_to(GCRS())
assert geo_gcrs.shape == test_gcrs.shape
def test_roundtrip_scalar():
icrs = ICRS(ra=1 * u.deg, dec=2 * u.deg, distance=3 * u.au)
gcrs = GCRS(icrs.cartesian)
bary = icrs.transform_to(BarycentricMeanEcliptic())
helio = icrs.transform_to(HeliocentricMeanEcliptic())
geo = gcrs.transform_to(GeocentricMeanEcliptic())
bary_icrs = bary.transform_to(ICRS())
helio_icrs = helio.transform_to(ICRS())
geo_gcrs = geo.transform_to(GCRS())
assert quantity_allclose(bary_icrs.cartesian.xyz, icrs.cartesian.xyz)
assert quantity_allclose(helio_icrs.cartesian.xyz, icrs.cartesian.xyz)
assert quantity_allclose(geo_gcrs.cartesian.xyz, gcrs.cartesian.xyz)
@pytest.mark.parametrize(
"frame",
[
HeliocentricMeanEcliptic,
HeliocentricTrueEcliptic,
GeocentricMeanEcliptic,
GeocentricTrueEcliptic,
HeliocentricEclipticIAU76,
],
)
def test_loopback_obstime(frame):
# Test that the loopback properly handles a change in obstime
from_coo = frame(1 * u.deg, 2 * u.deg, 3 * u.AU, obstime="2001-01-01")
to_frame = frame(obstime="2001-06-30")
explicit_coo = from_coo.transform_to(ICRS()).transform_to(to_frame)
implicit_coo = from_coo.transform_to(to_frame)
# Confirm that the explicit transformation changes the coordinate
assert not quantity_allclose(explicit_coo.lon, from_coo.lon, rtol=1e-10)
assert not quantity_allclose(explicit_coo.lat, from_coo.lat, rtol=1e-10)
assert not quantity_allclose(explicit_coo.distance, from_coo.distance, rtol=1e-10)
# Confirm that the loopback matches the explicit transformation
assert quantity_allclose(explicit_coo.lon, implicit_coo.lon, rtol=1e-10)
assert quantity_allclose(explicit_coo.lat, implicit_coo.lat, rtol=1e-10)
assert quantity_allclose(explicit_coo.distance, implicit_coo.distance, rtol=1e-10)
@pytest.mark.parametrize(
"frame",
[
BarycentricMeanEcliptic,
BarycentricTrueEcliptic,
HeliocentricMeanEcliptic,
HeliocentricTrueEcliptic,
GeocentricMeanEcliptic,
GeocentricTrueEcliptic,
],
)
def test_loopback_equinox(frame):
# Test that the loopback properly handles a change in equinox
from_coo = frame(1 * u.deg, 2 * u.deg, 3 * u.AU, equinox="2001-01-01")
to_frame = frame(equinox="2001-06-30")
explicit_coo = from_coo.transform_to(ICRS()).transform_to(to_frame)
implicit_coo = from_coo.transform_to(to_frame)
# Confirm that the explicit transformation changes the lon/lat but not the distance
assert not quantity_allclose(explicit_coo.lon, from_coo.lon, rtol=1e-10)
assert not quantity_allclose(explicit_coo.lat, from_coo.lat, rtol=1e-10)
assert quantity_allclose(explicit_coo.distance, from_coo.distance, rtol=1e-10)
# Confirm that the loopback matches the explicit transformation
assert quantity_allclose(explicit_coo.lon, implicit_coo.lon, rtol=1e-10)
assert quantity_allclose(explicit_coo.lat, implicit_coo.lat, rtol=1e-10)
assert quantity_allclose(explicit_coo.distance, implicit_coo.distance, rtol=1e-10)
def test_loopback_obliquity():
# Test that the loopback properly handles a change in obliquity
from_coo = CustomBarycentricEcliptic(
1 * u.deg, 2 * u.deg, 3 * u.AU, obliquity=84000 * u.arcsec
)
to_frame = CustomBarycentricEcliptic(obliquity=85000 * u.arcsec)
explicit_coo = from_coo.transform_to(ICRS()).transform_to(to_frame)
implicit_coo = from_coo.transform_to(to_frame)
# Confirm that the explicit transformation changes the lon/lat but not the distance
assert not quantity_allclose(explicit_coo.lon, from_coo.lon, rtol=1e-10)
assert not quantity_allclose(explicit_coo.lat, from_coo.lat, rtol=1e-10)
assert quantity_allclose(explicit_coo.distance, from_coo.distance, rtol=1e-10)
# Confirm that the loopback matches the explicit transformation
assert quantity_allclose(explicit_coo.lon, implicit_coo.lon, rtol=1e-10)
assert quantity_allclose(explicit_coo.lat, implicit_coo.lat, rtol=1e-10)
assert quantity_allclose(explicit_coo.distance, implicit_coo.distance, rtol=1e-10)
|
88a9fcee8908f4c8e78c63725a73b91112a8c0d7ea94192cc4daa7577da44346 | # Script to generate random targets, observatory locations, and times, and
# run these using the Starlink rv command to generate reference values for the
# velocity frame corrections. This requires that Starlink is installed and that
# the rv command is in your PATH. More information about Starlink can be found
# at http://starlink.eao.hawaii.edu/starlink
if __name__ == "__main__":
from random import choice
from subprocess import check_output
import numpy as np
from astropy import units as u
from astropy.coordinates import Angle, SkyCoord
from astropy.table import QTable
from astropy.time import Time
np.random.seed(12345)
N = 100
tab = QTable()
target_lon = np.random.uniform(0, 360, N) * u.deg
target_lat = np.degrees(np.arcsin(np.random.uniform(-1, 1, N))) * u.deg
tab["target"] = SkyCoord(target_lon, target_lat, frame="fk5")
tab["obstime"] = Time(
np.random.uniform(Time("1997-01-01").mjd, Time("2017-12-31").mjd, N),
format="mjd",
scale="utc",
)
tab["obslon"] = Angle(np.random.uniform(-180, 180, N) * u.deg)
tab["obslat"] = Angle(np.arcsin(np.random.uniform(-1, 1, N)) * u.deg)
tab["geocent"] = 0.0
tab["heliocent"] = 0.0
tab["lsrk"] = 0.0
tab["lsrd"] = 0.0
tab["galactoc"] = 0.0
tab["localgrp"] = 0.0
for row in tab:
# Produce input file for rv command
with open("rv.input", "w") as f:
f.write(
f"{row['obslon'].to_string('deg', sep=' ')}"
f" {row['obslat'].to_string('deg', sep=' ')}\n"
)
f.write(
f"{row['obstime'].datetime.year} {row['obstime'].datetime.month}"
f" {row['obstime'].datetime.day} 1\n"
)
f.write(row["target"].to_string("hmsdms", sep=" ") + " J2000\n")
f.write("END\n")
# Run Starlink rv command
check_output(["rv", "rv.input"])
# Parse values from output file
lis_lines = []
started = False
for lis_line in open("rv.lis"):
if started and lis_line.strip() != "":
lis_lines.append(lis_line.strip())
elif "LOCAL GROUP" in lis_line:
started = True
# Some sources are not observable at the specified time and therefore don't
# have entries in the rv output file
if len(lis_lines) == 0:
continue
# If there are lines, we pick one at random. Note that we can't get rv to
# run at the exact time we specified in the input, so we will re-parse the
# actual date/time used and replace it in the table
lis_line = choice(lis_lines)
# The column for 'SUN' has an entry also for the light travel time, which
# we want to ignore. It sometimes includes '(' followed by a space which
# can cause issues with splitting, hence why we get rid of the space.
lis_line = lis_line.replace("( ", "(").replace("( ", "(")
(
year,
month,
day,
time,
zd,
row["geocent"],
row["heliocent"],
_,
row["lsrk"],
row["lsrd"],
row["galactoc"],
row["localgrp"],
) = lis_line.split()
row["obstime"] = Time(
f"{year}-{month}-{day}T{time}:00", format="isot", scale="utc"
)
# We sampled 100 coordinates above since some may not have results - we now
# truncate to 50 sources since this is sufficient.
tab[:50].write("reference_rv.ecsv", format="ascii.ecsv")
|
9f9ee24c1a02f5daf314614509fe616420840e0a5f4ff64c7fea05a557e44d18 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Accuracy tests for AltAz to ICRS coordinate transformations.
We use "known good" examples computed with other coordinate libraries.
"""
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import Angle, EarthLocation, SkyCoord
from astropy.coordinates.builtin_frames import AltAz
from astropy.time import Time
def test_against_hor2eq():
"""Check that Astropy gives consistent results with an IDL hor2eq example.
See : http://idlastro.gsfc.nasa.gov/ftp/pro/astro/hor2eq.pro
Test is against these run outputs, run at 2000-01-01T12:00:00::
# NORMAL ATMOSPHERE CASE
IDL> hor2eq, ten(37,54,41), ten(264,55,06), 2451545.0d, ra, dec, /verb, obs='kpno', pres=781.0, temp=273.0
Latitude = +31 57 48.0 Longitude = *** 36 00.0
Julian Date = 2451545.000000
Az, El = 17 39 40.4 +37 54 41 (Observer Coords)
Az, El = 17 39 40.4 +37 53 40 (Apparent Coords)
LMST = +11 15 26.5
LAST = +11 15 25.7
Hour Angle = +03 38 30.1 (hh:mm:ss)
Ra, Dec: 07 36 55.6 +15 25 02 (Apparent Coords)
Ra, Dec: 07 36 55.2 +15 25 08 (J2000.0000)
Ra, Dec: 07 36 55.2 +15 25 08 (J2000)
IDL> print, ra, dec
114.23004 15.418818
# NO PRESSURE CASE
IDL> hor2eq, ten(37,54,41), ten(264,55,06), 2451545.0d, ra, dec, /verb, obs='kpno', pres=0.0, temp=273.0
Latitude = +31 57 48.0 Longitude = *** 36 00.0
Julian Date = 2451545.000000
Az, El = 17 39 40.4 +37 54 41 (Observer Coords)
Az, El = 17 39 40.4 +37 54 41 (Apparent Coords)
LMST = +11 15 26.5
LAST = +11 15 25.7
Hour Angle = +03 38 26.4 (hh:mm:ss)
Ra, Dec: 07 36 59.3 +15 25 31 (Apparent Coords)
Ra, Dec: 07 36 58.9 +15 25 37 (J2000.0000)
Ra, Dec: 07 36 58.9 +15 25 37 (J2000)
IDL> print, ra, dec
114.24554 15.427022
"""
# Observatory position for `kpno` from here:
# http://idlastro.gsfc.nasa.gov/ftp/pro/astro/observatory.pro
location = EarthLocation(
lon=Angle("-111d36.0m"), lat=Angle("31d57.8m"), height=2120.0 * u.m
)
obstime = Time(2451545.0, format="jd", scale="ut1")
altaz_frame = AltAz(
obstime=obstime,
location=location,
temperature=0 * u.deg_C,
pressure=0.781 * u.bar,
)
altaz_frame_noatm = AltAz(
obstime=obstime,
location=location,
temperature=0 * u.deg_C,
pressure=0.0 * u.bar,
)
altaz = SkyCoord("264d55m06s 37d54m41s", frame=altaz_frame)
altaz_noatm = SkyCoord("264d55m06s 37d54m41s", frame=altaz_frame_noatm)
radec_frame = "icrs"
radec_actual = altaz.transform_to(radec_frame)
radec_actual_noatm = altaz_noatm.transform_to(radec_frame)
radec_expected = SkyCoord("07h36m55.2s +15d25m08s", frame=radec_frame)
distance = radec_actual.separation(radec_expected).to("arcsec")
# this comes from running the example hor2eq but with the pressure set to 0
radec_expected_noatm = SkyCoord("07h36m58.9s +15d25m37s", frame=radec_frame)
distance_noatm = radec_actual_noatm.separation(radec_expected_noatm).to("arcsec")
# The baseline difference is ~2.3 arcsec with one atm of pressure. The
# difference is mainly due to the somewhat different atmospheric model that
# hor2eq assumes. This is confirmed by the second test which has the
# atmosphere "off" - the residual difference is small enough to be embedded
# in the assumptions about "J2000" or rounding errors.
assert distance < 5 * u.arcsec
assert distance_noatm < 0.4 * u.arcsec
def run_pyephem():
"""Test run of pyephem, just in case the numbers below need to be reproduced."""
import ephem
observer = ephem.Observer()
observer.lon = -1 * np.radians(109 + 24 / 60.0 + 53.1 / 60**2)
observer.lat = np.radians(33 + 41 / 60.0 + 46.0 / 60.0**2)
observer.elevation = 300
observer.date = 2455822.868055556 - ephem.julian_date(0)
ra, dec = observer.radec_of(np.radians(6.8927), np.radians(60.7665))
print(f"EPHEM: {observer.date}: {np.degrees(ra)}, {np.degrees(dec)}")
# 2021-04-06: EPHEM: 2011/9/18 08:50:00: 27.107480889479397, 62.512687777362046
# NOTE: independent of elevation.
def test_against_pyephem():
"""Check that Astropy gives consistent results with one PyEphem example.
PyEphem: https://rhodesmill.org/pyephem/
See example input and output here:
https://gist.github.com/zonca/1672906
https://github.com/phn/pytpm/issues/2#issuecomment-3698679
"""
obstime = Time("2011-09-18 08:50:00")
location = EarthLocation(
lon=Angle("-109d24m53.1s"), lat=Angle("33d41m46.0s"), height=300.0 * u.m
)
# We are using the default pressure and temperature in PyEphem
# relative_humidity = ?
# obswl = ?
altaz_frame = AltAz(
obstime=obstime,
location=location,
temperature=15 * u.deg_C,
pressure=1.010 * u.bar,
)
altaz = SkyCoord("6.8927d +60.7665d", frame=altaz_frame)
radec_actual = altaz.transform_to("icrs")
radec_expected = SkyCoord("27.107480889479397d +62.512687777362046d", frame="icrs")
distance_ephem = radec_actual.separation(radec_expected).to("arcsec")
# 2021-04-06: 2.42 arcsec
assert distance_ephem < 3 * u.arcsec
# Add assert on current Astropy result so that we notice if something changes
radec_expected = SkyCoord("27.10602683d +62.51275391d", frame="icrs")
distance_astropy = radec_actual.separation(radec_expected).to("arcsec")
# 2021-04-06: 5e-6 arcsec (erfa 1.7.2 vs erfa 1.7.1).
assert distance_astropy < 0.1 * u.arcsec
def test_against_jpl_horizons():
"""Check that Astropy gives consistent results with the JPL Horizons example.
The input parameters and reference results are taken from this page:
(from the first row of the Results table at the bottom of that page)
http://ssd.jpl.nasa.gov/?horizons_tutorial
"""
obstime = Time("1998-07-28 03:00")
location = EarthLocation(
lon=Angle("248.405300d"), lat=Angle("31.9585d"), height=2.06 * u.km
)
# No atmosphere
altaz_frame = AltAz(obstime=obstime, location=location)
altaz = SkyCoord("143.2970d 2.6223d", frame=altaz_frame)
radec_actual = altaz.transform_to("icrs")
radec_expected = SkyCoord("19h24m55.01s -40d56m28.9s", frame="icrs")
distance = radec_actual.separation(radec_expected).to("arcsec")
# 2021-04-06: astropy 4.2.1, erfa 1.7.1: 0.23919259 arcsec
# 2021-04-06: astropy 4.3dev, erfa 1.7.2: 0.2391959 arcsec
assert distance < 1 * u.arcsec
@pytest.mark.xfail(reason="Current output is completely incorrect")
def test_fk5_equinox_and_epoch_j2000_0_to_topocentric_observed():
"""
http://phn.github.io/pytpm/conversions.html#fk5-equinox-and-epoch-j2000-0-to-topocentric-observed
"""
# Observatory position for `kpno` from here:
# http://idlastro.gsfc.nasa.gov/ftp/pro/astro/observatory.pro
location = EarthLocation(
lon=Angle("-111.598333d"), lat=Angle("31.956389d"), height=2093.093 * u.m
) # TODO: height correct?
obstime = Time("2010-01-01 12:00:00")
# relative_humidity = ?
# obswl = ?
altaz_frame = AltAz(
obstime=obstime,
location=location,
temperature=0 * u.deg_C,
pressure=0.781 * u.bar,
)
radec = SkyCoord("12h22m54.899s 15d49m20.57s", frame="fk5")
altaz_actual = radec.transform_to(altaz_frame)
altaz_expected = SkyCoord("264d55m06s 37d54m41s", frame="altaz")
# altaz_expected = SkyCoord('343.586827647d 15.7683070508d', frame='altaz')
# altaz_expected = SkyCoord('133.498195532d 22.0162383595d', frame='altaz')
distance = altaz_actual.separation(altaz_expected)
# print(altaz_actual)
# print(altaz_expected)
# print(distance)
"""TODO: Current output is completely incorrect ... xfailing this test for now.
<SkyCoord (AltAz: obstime=2010-01-01 12:00:00.000, location=(-1994497.7199061865, -5037954.447348028, 3357437.2294832403) m, pressure=781.0 hPa, temperature=0.0 deg_C, relative_humidity=0, obswl=1.0 micron):00:00.000, location=(-1994497.7199061865, -5037954.447348028, 3357437.2294832403) m, pressure=781.0 hPa, temperature=0.0 deg_C, relative_humidity=0, obswl=1.0 micron): az=133.4869896371561 deg, alt=67.97857990957701 deg>
<SkyCoord (AltAz: obstime=None, location=None, pressure=0.0 hPa, temperature=0.0 deg_C, relative_humidity=0, obswl=1.0 micron): az=264.91833333333335 deg, alt=37.91138888888889 deg>
68d02m45.732s
"""
assert distance < 1 * u.arcsec
|
86953b50128903247995f23bc62fda1084f8d4d6649a7d5ae14c3a701435d43a | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import errno
import gzip
import http.client
import io
import mmap
import operator
import os
import re
import sys
import tempfile
import warnings
import zipfile
from functools import reduce
import numpy as np
# NOTE: Python can be built without bz2.
from astropy.utils.compat.optional_deps import HAS_BZ2
from astropy.utils.data import (
_is_url,
_requires_fsspec,
download_file,
get_readable_fileobj,
)
from astropy.utils.decorators import classproperty
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import NOT_OVERWRITING_MSG
from .util import (
_array_from_file,
_array_to_file,
_write_string,
fileobj_closed,
fileobj_mode,
fileobj_name,
isfile,
isreadable,
iswritable,
path_like,
)
if HAS_BZ2:
import bz2
# Maps astropy.io.fits-specific file mode names to the appropriate file
# modes to use for the underlying raw files.
IO_FITS_MODES = {
"readonly": "rb",
"copyonwrite": "rb",
"update": "rb+",
"append": "ab+",
"ostream": "wb",
"denywrite": "rb",
}
# Maps OS-level file modes to the appropriate astropy.io.fits specific mode
# to use when given file objects but no mode specified; obviously in
# IO_FITS_MODES there are overlaps; for example 'readonly' and 'denywrite'
# both require the file to be opened in 'rb' mode. But 'readonly' is the
# default behavior for such files if not otherwise specified.
# Note: 'ab' is only supported for 'ostream' which is output-only.
FILE_MODES = {
"rb": "readonly",
"rb+": "update",
"wb": "ostream",
"wb+": "update",
"ab": "ostream",
"ab+": "append",
}
# A match indicates the file was opened in text mode, which is not allowed
TEXT_RE = re.compile(r"^[rwa]((t?\+?)|(\+?t?))$")
# readonly actually uses copyonwrite for mmap so that readonly without mmap and
# with mmap still have to same behavior with regard to updating the array. To
# get a truly readonly mmap use denywrite
# the name 'denywrite' comes from a deprecated flag to mmap() on Linux--it
# should be clarified that 'denywrite' mode is not directly analogous to the
# use of that flag; it was just taken, for lack of anything better, as a name
# that means something like "read only" but isn't readonly.
MEMMAP_MODES = {
"readonly": mmap.ACCESS_COPY,
"copyonwrite": mmap.ACCESS_COPY,
"update": mmap.ACCESS_WRITE,
"append": mmap.ACCESS_COPY,
"denywrite": mmap.ACCESS_READ,
}
# TODO: Eventually raise a warning, and maybe even later disable the use of
# 'copyonwrite' and 'denywrite' modes unless memmap=True. For now, however,
# that would generate too many warnings for too many users. If nothing else,
# wait until the new logging system is in place.
GZIP_MAGIC = b"\x1f\x8b\x08"
PKZIP_MAGIC = b"\x50\x4b\x03\x04"
BZIP2_MAGIC = b"\x42\x5a"
def _is_bz2file(fileobj):
if HAS_BZ2:
return isinstance(fileobj, bz2.BZ2File)
else:
return False
def _normalize_fits_mode(mode):
if mode is not None and mode not in IO_FITS_MODES:
if TEXT_RE.match(mode):
raise ValueError(
"Text mode '{}' not supported: "
"files must be opened in binary mode".format(mode)
)
new_mode = FILE_MODES.get(mode)
if new_mode not in IO_FITS_MODES:
raise ValueError(f"Mode '{mode}' not recognized")
mode = new_mode
return mode
class _File:
"""
Represents a FITS file on disk (or in some other file-like object).
"""
def __init__(
self,
fileobj=None,
mode=None,
memmap=None,
overwrite=False,
cache=True,
*,
use_fsspec=None,
fsspec_kwargs=None,
):
self.strict_memmap = bool(memmap)
memmap = True if memmap is None else memmap
self._file = None
self.closed = False
self.binary = True
self.mode = mode
self.memmap = memmap
self.compression = None
self.readonly = False
self.writeonly = False
# Should the object be closed on error: see
# https://github.com/astropy/astropy/issues/6168
self.close_on_error = False
# Holds mmap instance for files that use mmap
self._mmap = None
if fileobj is None:
self.simulateonly = True
return
else:
self.simulateonly = False
if isinstance(fileobj, os.PathLike):
fileobj = os.fspath(fileobj)
if mode is not None and mode not in IO_FITS_MODES:
raise ValueError(f"Mode '{mode}' not recognized")
if isfile(fileobj):
objmode = _normalize_fits_mode(fileobj_mode(fileobj))
if mode is not None and mode != objmode:
raise ValueError(
"Requested FITS mode '{}' not compatible with open file "
"handle mode '{}'".format(mode, objmode)
)
mode = objmode
if mode is None:
mode = "readonly"
# Handle cloud-hosted files using the optional ``fsspec`` dependency
if (use_fsspec or _requires_fsspec(fileobj)) and mode != "ostream":
# Note: we don't use `get_readable_fileobj` as a context manager
# because io.fits takes care of closing files itself
fileobj = get_readable_fileobj(
fileobj,
encoding="binary",
use_fsspec=use_fsspec,
fsspec_kwargs=fsspec_kwargs,
close_files=False,
).__enter__()
# Handle raw URLs
if (
isinstance(fileobj, (str, bytes))
and mode not in ("ostream", "append", "update")
and _is_url(fileobj)
):
self.name = download_file(fileobj, cache=cache)
# Handle responses from URL requests that have already been opened
elif isinstance(fileobj, http.client.HTTPResponse):
if mode in ("ostream", "append", "update"):
raise ValueError(f"Mode {mode} not supported for HTTPResponse")
fileobj = io.BytesIO(fileobj.read())
else:
if isinstance(fileobj, path_like):
fileobj = os.path.expanduser(fileobj)
self.name = fileobj_name(fileobj)
self.mode = mode
# Underlying fileobj is a file-like object, but an actual file object
self.file_like = False
# Initialize the internal self._file object
if isfile(fileobj):
self._open_fileobj(fileobj, mode, overwrite)
elif isinstance(fileobj, (str, bytes)):
self._open_filename(fileobj, mode, overwrite)
else:
self._open_filelike(fileobj, mode, overwrite)
self.fileobj_mode = fileobj_mode(self._file)
if isinstance(fileobj, gzip.GzipFile):
self.compression = "gzip"
elif isinstance(fileobj, zipfile.ZipFile):
# Reading from zip files is supported but not writing (yet)
self.compression = "zip"
elif _is_bz2file(fileobj):
self.compression = "bzip2"
if mode in ("readonly", "copyonwrite", "denywrite") or (
self.compression and mode == "update"
):
self.readonly = True
elif mode == "ostream" or (self.compression and mode == "append"):
self.writeonly = True
# For 'ab+' mode, the pointer is at the end after the open in
# Linux, but is at the beginning in Solaris.
if mode == "ostream" or self.compression or not hasattr(self._file, "seek"):
# For output stream start with a truncated file.
# For compressed files we can't really guess at the size
self.size = 0
else:
pos = self._file.tell()
self._file.seek(0, 2)
self.size = self._file.tell()
self._file.seek(pos)
if self.memmap:
if not isfile(self._file):
self.memmap = False
elif not self.readonly and not self._mmap_available:
# Test mmap.flush--see
# https://github.com/astropy/astropy/issues/968
self.memmap = False
def __repr__(self):
return f"<{self.__module__}.{self.__class__.__name__} {self._file}>"
# Support the 'with' statement
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def readable(self):
if self.writeonly:
return False
return isreadable(self._file)
def read(self, size=None):
if not hasattr(self._file, "read"):
raise EOFError
try:
return self._file.read(size)
except OSError:
# On some versions of Python, it appears, GzipFile will raise an
# OSError if you try to read past its end (as opposed to just
# returning '')
if self.compression == "gzip":
return ""
raise
def readarray(self, size=None, offset=0, dtype=np.uint8, shape=None):
"""
Similar to file.read(), but returns the contents of the underlying
file as a numpy array (or mmap'd array if memmap=True) rather than a
string.
Usually it's best not to use the `size` argument with this method, but
it's provided for compatibility.
"""
if not hasattr(self._file, "read"):
raise EOFError
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
if size and size % dtype.itemsize != 0:
raise ValueError(f"size {size} not a multiple of {dtype}")
if isinstance(shape, int):
shape = (shape,)
if not (size or shape):
warnings.warn(
"No size or shape given to readarray(); assuming a shape of (1,)",
AstropyUserWarning,
)
shape = (1,)
if size and not shape:
shape = (size // dtype.itemsize,)
if size and shape:
actualsize = np.prod(shape) * dtype.itemsize
if actualsize > size:
raise ValueError(
"size {} is too few bytes for a {} array of {}".format(
size, shape, dtype
)
)
elif actualsize < size:
raise ValueError(
"size {} is too many bytes for a {} array of {}".format(
size, shape, dtype
)
)
filepos = self._file.tell()
try:
if self.memmap:
if self._mmap is None:
# Instantiate Memmap array of the file offset at 0 (so we
# can return slices of it to offset anywhere else into the
# file)
access_mode = MEMMAP_MODES[self.mode]
# For reasons unknown the file needs to point to (near)
# the beginning or end of the file. No idea how close to
# the beginning or end.
# If I had to guess there is some bug in the mmap module
# of CPython or perhaps in microsoft's underlying code
# for generating the mmap.
self._file.seek(0, 0)
# This would also work:
# self._file.seek(0, 2) # moves to the end
try:
self._mmap = mmap.mmap(
self._file.fileno(), 0, access=access_mode, offset=0
)
except OSError as exc:
# NOTE: mode='readonly' results in the memory-mapping
# using the ACCESS_COPY mode in mmap so that users can
# modify arrays. However, on some systems, the OS raises
# a '[Errno 12] Cannot allocate memory' OSError if the
# address space is smaller than the file. The solution
# is to open the file in mode='denywrite', which at
# least allows the file to be opened even if the
# resulting arrays will be truly read-only.
if exc.errno == errno.ENOMEM and self.mode == "readonly":
warnings.warn(
"Could not memory map array with "
"mode='readonly', falling back to "
"mode='denywrite', which means that "
"the array will be read-only",
AstropyUserWarning,
)
self._mmap = mmap.mmap(
self._file.fileno(),
0,
access=MEMMAP_MODES["denywrite"],
offset=0,
)
else:
raise
return np.ndarray(
shape=shape, dtype=dtype, offset=offset, buffer=self._mmap
)
else:
count = reduce(operator.mul, shape)
self._file.seek(offset)
data = _array_from_file(self._file, dtype, count)
data.shape = shape
return data
finally:
# Make sure we leave the file in the position we found it; on
# some platforms (e.g. Windows) mmaping a file handle can also
# reset its file pointer.
# Also for Windows when using mmap seek() may return weird
# negative values, which is fixed by calling tell() before.
self._file.tell()
self._file.seek(filepos)
def writable(self):
if self.readonly:
return False
return iswritable(self._file)
def write(self, string):
if self.simulateonly:
return
if hasattr(self._file, "write"):
_write_string(self._file, string)
def writearray(self, array):
"""
Similar to file.write(), but writes a numpy array instead of a string.
Also like file.write(), a flush() or close() may be needed before
the file on disk reflects the data written.
"""
if self.simulateonly:
return
if hasattr(self._file, "write"):
_array_to_file(array, self._file)
def flush(self):
if self.simulateonly:
return
if hasattr(self._file, "flush"):
self._file.flush()
def seek(self, offset, whence=0):
if not hasattr(self._file, "seek"):
return
self._file.seek(offset, whence)
pos = self._file.tell()
if self.size and pos > self.size:
warnings.warn(
"File may have been truncated: actual file length "
"({}) is smaller than the expected size ({})".format(self.size, pos),
AstropyUserWarning,
)
def tell(self):
if self.simulateonly:
raise OSError
if not hasattr(self._file, "tell"):
raise EOFError
return self._file.tell()
def truncate(self, size=None):
if hasattr(self._file, "truncate"):
self._file.truncate(size)
def close(self):
"""
Close the 'physical' FITS file.
"""
if hasattr(self._file, "close"):
self._file.close()
self._maybe_close_mmap()
# Set self._memmap to None anyways since no new .data attributes can be
# loaded after the file is closed
self._mmap = None
self.closed = True
self.close_on_error = False
def _maybe_close_mmap(self, refcount_delta=0):
"""
When mmap is in use these objects hold a reference to the mmap of the
file (so there is only one, shared by all HDUs that reference this
file).
This will close the mmap if there are no arrays referencing it.
"""
if self._mmap is not None and sys.getrefcount(self._mmap) == 2 + refcount_delta:
self._mmap.close()
self._mmap = None
def _overwrite_existing(self, overwrite, fileobj, closed):
"""Overwrite an existing file if ``overwrite`` is ``True``, otherwise
raise an OSError. The exact behavior of this method depends on the
_File object state and is only meant for use within the ``_open_*``
internal methods.
"""
# The file will be overwritten...
if (self.file_like and hasattr(fileobj, "len") and fileobj.len > 0) or (
os.path.exists(self.name) and os.path.getsize(self.name) != 0
):
if overwrite:
if self.file_like and hasattr(fileobj, "truncate"):
fileobj.truncate(0)
else:
if not closed:
fileobj.close()
os.remove(self.name)
else:
raise OSError(NOT_OVERWRITING_MSG.format(self.name))
def _try_read_compressed(self, obj_or_name, magic, mode, ext=""):
"""Attempt to determine if the given file is compressed"""
is_ostream = mode == "ostream"
if (is_ostream and ext == ".gz") or magic.startswith(GZIP_MAGIC):
if mode == "append":
raise OSError(
"'append' mode is not supported with gzip files."
"Use 'update' mode instead"
)
# Handle gzip files
kwargs = dict(mode=IO_FITS_MODES[mode])
if isinstance(obj_or_name, str):
kwargs["filename"] = obj_or_name
else:
kwargs["fileobj"] = obj_or_name
self._file = gzip.GzipFile(**kwargs)
self.compression = "gzip"
elif (is_ostream and ext == ".zip") or magic.startswith(PKZIP_MAGIC):
# Handle zip files
self._open_zipfile(self.name, mode)
self.compression = "zip"
elif (is_ostream and ext == ".bz2") or magic.startswith(BZIP2_MAGIC):
# Handle bzip2 files
if mode in ["update", "append"]:
raise OSError(
"update and append modes are not supported with bzip2 files"
)
if not HAS_BZ2:
raise ModuleNotFoundError(
"This Python installation does not provide the bz2 module."
)
# bzip2 only supports 'w' and 'r' modes
bzip2_mode = "w" if is_ostream else "r"
self._file = bz2.BZ2File(obj_or_name, mode=bzip2_mode)
self.compression = "bzip2"
return self.compression is not None
def _open_fileobj(self, fileobj, mode, overwrite):
"""Open a FITS file from a file object (including compressed files)."""
closed = fileobj_closed(fileobj)
# FIXME: this variable was unused, check if it was useful
# fmode = fileobj_mode(fileobj) or IO_FITS_MODES[mode]
if mode == "ostream":
self._overwrite_existing(overwrite, fileobj, closed)
if not closed:
self._file = fileobj
elif isfile(fileobj):
self._file = open(self.name, IO_FITS_MODES[mode])
# Attempt to determine if the file represented by the open file object
# is compressed
try:
# We need to account for the possibility that the underlying file
# handle may have been opened with either 'ab' or 'ab+', which
# means that the current file position is at the end of the file.
if mode in ["ostream", "append"]:
self._file.seek(0)
magic = self._file.read(4)
# No matter whether the underlying file was opened with 'ab' or
# 'ab+', we need to return to the beginning of the file in order
# to properly process the FITS header (and handle the possibility
# of a compressed file).
self._file.seek(0)
except OSError:
return
self._try_read_compressed(fileobj, magic, mode)
def _open_filelike(self, fileobj, mode, overwrite):
"""Open a FITS file from a file-like object, i.e. one that has
read and/or write methods.
"""
self.file_like = True
self._file = fileobj
if fileobj_closed(fileobj):
raise OSError(
"Cannot read from/write to a closed file-like object ({!r}).".format(
fileobj
)
)
if isinstance(fileobj, zipfile.ZipFile):
self._open_zipfile(fileobj, mode)
# We can bypass any additional checks at this point since now
# self._file points to the temp file extracted from the zip
return
# If there is not seek or tell methods then set the mode to
# output streaming.
if not hasattr(self._file, "seek") or not hasattr(self._file, "tell"):
self.mode = mode = "ostream"
if mode == "ostream":
self._overwrite_existing(overwrite, fileobj, False)
# Any "writeable" mode requires a write() method on the file object
if self.mode in ("update", "append", "ostream") and not hasattr(
self._file, "write"
):
raise OSError(
"File-like object does not have a 'write' "
"method, required for mode '{}'.".format(self.mode)
)
# Any mode except for 'ostream' requires readability
if self.mode != "ostream" and not hasattr(self._file, "read"):
raise OSError(
"File-like object does not have a 'read' "
"method, required for mode {!r}.".format(self.mode)
)
def _open_filename(self, filename, mode, overwrite):
"""Open a FITS file from a filename string."""
if mode == "ostream":
self._overwrite_existing(overwrite, None, True)
if os.path.exists(self.name):
with open(self.name, "rb") as f:
magic = f.read(4)
else:
magic = b""
ext = os.path.splitext(self.name)[1]
if not self._try_read_compressed(self.name, magic, mode, ext=ext):
self._file = open(self.name, IO_FITS_MODES[mode])
self.close_on_error = True
# Make certain we're back at the beginning of the file
# BZ2File does not support seek when the file is open for writing, but
# when opening a file for write, bz2.BZ2File always truncates anyway.
if not (_is_bz2file(self._file) and mode == "ostream"):
self._file.seek(0)
@classproperty(lazy=True)
def _mmap_available(cls):
"""Tests that mmap, and specifically mmap.flush works. This may
be the case on some uncommon platforms (see
https://github.com/astropy/astropy/issues/968).
If mmap.flush is found not to work, ``self.memmap = False`` is
set and a warning is issued.
"""
tmpfd, tmpname = tempfile.mkstemp()
try:
# Windows does not allow mappings on empty files
os.write(tmpfd, b" ")
os.fsync(tmpfd)
try:
mm = mmap.mmap(tmpfd, 1, access=mmap.ACCESS_WRITE)
except OSError as exc:
warnings.warn(
"Failed to create mmap: {}; mmap use will be disabled".format(
str(exc)
),
AstropyUserWarning,
)
del exc
return False
try:
mm.flush()
except OSError:
warnings.warn(
"mmap.flush is unavailable on this platform; "
"using mmap in writeable mode will be disabled",
AstropyUserWarning,
)
return False
finally:
mm.close()
finally:
os.close(tmpfd)
os.remove(tmpname)
return True
def _open_zipfile(self, fileobj, mode):
"""Limited support for zipfile.ZipFile objects containing a single
a file. Allows reading only for now by extracting the file to a
tempfile.
"""
if mode in ("update", "append"):
raise OSError("Writing to zipped fits files is not currently supported")
if not isinstance(fileobj, zipfile.ZipFile):
zfile = zipfile.ZipFile(fileobj)
close = True
else:
zfile = fileobj
close = False
namelist = zfile.namelist()
if len(namelist) != 1:
raise OSError("Zip files with multiple members are not supported.")
self._file = tempfile.NamedTemporaryFile(suffix=".fits")
self._file.write(zfile.read(namelist[0]))
if close:
zfile.close()
# We just wrote the contents of the first file in the archive to a new
# temp file, which now serves as our underlying file object. So it's
# necessary to reset the position back to the beginning
self._file.seek(0)
|
5efd00ff03e84dd1e757fabd4c4a72bb13404ac3e93d2a5cc1ae80c9677e796c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import re
import warnings
from copy import deepcopy
import numpy as np
from astropy import units as u
from astropy.io import registry as io_registry
from astropy.table import Column, MaskedColumn, Table, meta, serialize
from astropy.time import Time
from astropy.utils.data_info import serialize_context_as
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
from astropy.utils.misc import NOT_OVERWRITING_MSG
from . import BinTableHDU, GroupsHDU, HDUList, TableHDU
from . import append as fits_append
from .column import KEYWORD_NAMES, _fortran_to_python_format
from .convenience import table_to_hdu
from .hdu.hdulist import FITS_SIGNATURE
from .hdu.hdulist import fitsopen as fits_open
from .util import first
# Keywords to remove for all tables that are read in
REMOVE_KEYWORDS = [
"XTENSION",
"BITPIX",
"NAXIS",
"NAXIS1",
"NAXIS2",
"PCOUNT",
"GCOUNT",
"TFIELDS",
"THEAP",
]
# Column-specific keywords regex
COLUMN_KEYWORD_REGEXP = "(" + "|".join(KEYWORD_NAMES) + ")[0-9]+"
def is_column_keyword(keyword):
return re.match(COLUMN_KEYWORD_REGEXP, keyword) is not None
def is_fits(origin, filepath, fileobj, *args, **kwargs):
"""
Determine whether `origin` is a FITS file.
Parameters
----------
origin : str or readable file-like
Path or file object containing a potential FITS file.
Returns
-------
is_fits : bool
Returns `True` if the given file is a FITS file.
"""
if fileobj is not None:
pos = fileobj.tell()
sig = fileobj.read(30)
fileobj.seek(pos)
return sig == FITS_SIGNATURE
elif filepath is not None:
if filepath.lower().endswith(
(".fits", ".fits.gz", ".fit", ".fit.gz", ".fts", ".fts.gz")
):
return True
elif isinstance(args[0], (HDUList, TableHDU, BinTableHDU, GroupsHDU)):
return True
else:
return False
def _decode_mixins(tbl):
"""Decode a Table ``tbl`` that has astropy Columns + appropriate meta-data into
the corresponding table with mixin columns (as appropriate).
"""
# If available read in __serialized_columns__ meta info which is stored
# in FITS COMMENTS between two sentinels.
try:
i0 = tbl.meta["comments"].index("--BEGIN-ASTROPY-SERIALIZED-COLUMNS--")
i1 = tbl.meta["comments"].index("--END-ASTROPY-SERIALIZED-COLUMNS--")
except (ValueError, KeyError):
return tbl
# The YAML data are split into COMMENT cards, with lines longer than 70
# characters being split with a continuation character \ (backslash).
# Strip the backslashes and join together.
continuation_line = False
lines = []
for line in tbl.meta["comments"][i0 + 1 : i1]:
if continuation_line:
lines[-1] = lines[-1] + line[:70]
else:
lines.append(line[:70])
continuation_line = len(line) == 71
del tbl.meta["comments"][i0 : i1 + 1]
if not tbl.meta["comments"]:
del tbl.meta["comments"]
info = meta.get_header_from_yaml(lines)
# Add serialized column information to table meta for use in constructing mixins
tbl.meta["__serialized_columns__"] = info["meta"]["__serialized_columns__"]
# Use the `datatype` attribute info to update column attributes that are
# NOT already handled via standard FITS column keys (name, dtype, unit).
for col in info["datatype"]:
for attr in ["description", "meta"]:
if attr in col:
setattr(tbl[col["name"]].info, attr, col[attr])
# Construct new table with mixins, using tbl.meta['__serialized_columns__']
# as guidance.
tbl = serialize._construct_mixins_from_columns(tbl)
return tbl
def read_table_fits(
input,
hdu=None,
astropy_native=False,
memmap=False,
character_as_bytes=True,
unit_parse_strict="warn",
mask_invalid=True,
):
"""
Read a Table object from an FITS file
If the ``astropy_native`` argument is ``True``, then input FITS columns
which are representations of an astropy core object will be converted to
that class and stored in the ``Table`` as "mixin columns". Currently this
is limited to FITS columns which adhere to the FITS Time standard, in which
case they will be converted to a `~astropy.time.Time` column in the output
table.
Parameters
----------
input : str or file-like or compatible `astropy.io.fits` HDU object
If a string, the filename to read the table from. If a file object, or
a compatible HDU object, the object to extract the table from. The
following `astropy.io.fits` HDU objects can be used as input:
- :class:`~astropy.io.fits.hdu.table.TableHDU`
- :class:`~astropy.io.fits.hdu.table.BinTableHDU`
- :class:`~astropy.io.fits.hdu.table.GroupsHDU`
- :class:`~astropy.io.fits.hdu.hdulist.HDUList`
hdu : int or str, optional
The HDU to read the table from.
astropy_native : bool, optional
Read in FITS columns as native astropy objects where possible instead
of standard Table Column objects. Default is False.
memmap : bool, optional
Whether to use memory mapping, which accesses data on disk as needed. If
you are only accessing part of the data, this is often more efficient.
If you want to access all the values in the table, and you are able to
fit the table in memory, you may be better off leaving memory mapping
off. However, if your table would not fit in memory, you should set this
to `True`.
When set to `True` then ``mask_invalid`` is set to `False` since the
masking would cause loading the full data array.
character_as_bytes : bool, optional
If `True`, string columns are stored as Numpy byte arrays (dtype ``S``)
and are converted on-the-fly to unicode strings when accessing
individual elements. If you need to use Numpy unicode arrays (dtype
``U``) internally, you should set this to `False`, but note that this
will use more memory. If set to `False`, string columns will not be
memory-mapped even if ``memmap`` is `True`.
unit_parse_strict : str, optional
Behaviour when encountering invalid column units in the FITS header.
Default is "warn", which will emit a ``UnitsWarning`` and create a
:class:`~astropy.units.core.UnrecognizedUnit`.
Values are the ones allowed by the ``parse_strict`` argument of
:class:`~astropy.units.core.Unit`: ``raise``, ``warn`` and ``silent``.
mask_invalid : bool, optional
By default the code masks NaNs in float columns and empty strings in
string columns. Set this parameter to `False` to avoid the performance
penalty of doing this masking step. The masking is always deactivated
when using ``memmap=True`` (see above).
"""
if isinstance(input, HDUList):
# Parse all table objects
tables = dict()
for ihdu, hdu_item in enumerate(input):
if isinstance(hdu_item, (TableHDU, BinTableHDU, GroupsHDU)):
tables[ihdu] = hdu_item
if len(tables) > 1:
if hdu is None:
warnings.warn(
"hdu= was not specified but multiple tables"
" are present, reading in first available"
f" table (hdu={first(tables)})",
AstropyUserWarning,
)
hdu = first(tables)
# hdu might not be an integer, so we first need to convert it
# to the correct HDU index
hdu = input.index_of(hdu)
if hdu in tables:
table = tables[hdu]
else:
raise ValueError(f"No table found in hdu={hdu}")
elif len(tables) == 1:
if hdu is not None:
msg = None
try:
hdi = input.index_of(hdu)
except KeyError:
msg = f"Specified hdu={hdu} not found"
else:
if hdi >= len(input):
msg = f"Specified hdu={hdu} not found"
elif hdi not in tables:
msg = f"No table found in specified hdu={hdu}"
if msg is not None:
warnings.warn(
f"{msg}, reading in first available table "
f"(hdu={first(tables)}) instead. This will"
" result in an error in future versions!",
AstropyDeprecationWarning,
)
table = tables[first(tables)]
else:
raise ValueError("No table found")
elif isinstance(input, (TableHDU, BinTableHDU, GroupsHDU)):
table = input
else:
if memmap:
# using memmap is not compatible with masking invalid value by
# default so we deactivate the masking
mask_invalid = False
hdulist = fits_open(input, character_as_bytes=character_as_bytes, memmap=memmap)
try:
return read_table_fits(
hdulist,
hdu=hdu,
astropy_native=astropy_native,
unit_parse_strict=unit_parse_strict,
mask_invalid=mask_invalid,
)
finally:
hdulist.close()
# In the loop below we access the data using data[col.name] rather than
# col.array to make sure that the data is scaled correctly if needed.
data = table.data
columns = []
for col in data.columns:
# Check if column is masked. Here, we make a guess based on the
# presence of FITS mask values. For integer columns, this is simply
# the null header, for float and complex, the presence of NaN, and for
# string, empty strings.
# Since Multi-element columns with dtypes such as '2f8' have a subdtype,
# we should look up the type of column on that.
masked = mask = False
coltype = col.dtype.subdtype[0].type if col.dtype.subdtype else col.dtype.type
if col.null is not None:
mask = data[col.name] == col.null
# Return a MaskedColumn even if no elements are masked so
# we roundtrip better.
masked = True
elif mask_invalid and issubclass(coltype, np.inexact):
mask = np.isnan(data[col.name])
elif mask_invalid and issubclass(coltype, np.character):
mask = col.array == b""
if masked or np.any(mask):
column = MaskedColumn(
data=data[col.name], name=col.name, mask=mask, copy=False
)
else:
column = Column(data=data[col.name], name=col.name, copy=False)
# Copy over units
if col.unit is not None:
column.unit = u.Unit(
col.unit, format="fits", parse_strict=unit_parse_strict
)
# Copy over display format
if col.disp is not None:
column.format = _fortran_to_python_format(col.disp)
columns.append(column)
# Create Table object
t = Table(columns, copy=False)
# TODO: deal properly with unsigned integers
hdr = table.header
if astropy_native:
# Avoid circular imports, and also only import if necessary.
from .fitstime import fits_to_time
hdr = fits_to_time(hdr, t)
for key, value, comment in hdr.cards:
if key in ["COMMENT", "HISTORY"]:
# Convert to io.ascii format
if key == "COMMENT":
key = "comments"
if key in t.meta:
t.meta[key].append(value)
else:
t.meta[key] = [value]
elif key in t.meta: # key is duplicate
if isinstance(t.meta[key], list):
t.meta[key].append(value)
else:
t.meta[key] = [t.meta[key], value]
elif is_column_keyword(key) or key in REMOVE_KEYWORDS:
pass
else:
t.meta[key] = value
# TODO: implement masking
# Decode any mixin columns that have been stored as standard Columns.
t = _decode_mixins(t)
return t
def _encode_mixins(tbl):
"""Encode a Table ``tbl`` that may have mixin columns to a Table with only
astropy Columns + appropriate meta-data to allow subsequent decoding.
"""
# Determine if information will be lost without serializing meta. This is hardcoded
# to the set difference between column info attributes and what FITS can store
# natively (name, dtype, unit). See _get_col_attributes() in table/meta.py for where
# this comes from.
info_lost = any(
any(
getattr(col.info, attr, None) not in (None, {})
for attr in ("description", "meta")
)
for col in tbl.itercols()
)
# Convert the table to one with no mixins, only Column objects. This adds
# meta data which is extracted with meta.get_yaml_from_table. This ignores
# Time-subclass columns and leave them in the table so that the downstream
# FITS Time handling does the right thing.
with serialize_context_as("fits"):
encode_tbl = serialize.represent_mixins_as_columns(tbl, exclude_classes=(Time,))
# If the encoded table is unchanged then there were no mixins. But if there
# is column metadata (format, description, meta) that would be lost, then
# still go through the serialized columns machinery.
if encode_tbl is tbl and not info_lost:
return tbl
# Copy the meta dict if it was not copied by represent_mixins_as_columns.
# We will modify .meta['comments'] below and we do not want to see these
# comments in the input table.
if encode_tbl is tbl:
meta_copy = deepcopy(tbl.meta)
encode_tbl = Table(tbl.columns, meta=meta_copy, copy=False)
# Get the YAML serialization of information describing the table columns.
# This is re-using ECSV code that combined existing table.meta with with
# the extra __serialized_columns__ key. For FITS the table.meta is handled
# by the native FITS connect code, so don't include that in the YAML
# output.
ser_col = "__serialized_columns__"
# encode_tbl might not have a __serialized_columns__ key if there were no mixins,
# but machinery below expects it to be available, so just make an empty dict.
encode_tbl.meta.setdefault(ser_col, {})
tbl_meta_copy = encode_tbl.meta.copy()
try:
encode_tbl.meta = {ser_col: encode_tbl.meta[ser_col]}
meta_yaml_lines = meta.get_yaml_from_table(encode_tbl)
finally:
encode_tbl.meta = tbl_meta_copy
del encode_tbl.meta[ser_col]
if "comments" not in encode_tbl.meta:
encode_tbl.meta["comments"] = []
encode_tbl.meta["comments"].append("--BEGIN-ASTROPY-SERIALIZED-COLUMNS--")
for line in meta_yaml_lines:
if len(line) == 0:
lines = [""]
else:
# Split line into 70 character chunks for COMMENT cards
idxs = list(range(0, len(line) + 70, 70))
lines = [line[i0:i1] + "\\" for i0, i1 in zip(idxs[:-1], idxs[1:])]
lines[-1] = lines[-1][:-1]
encode_tbl.meta["comments"].extend(lines)
encode_tbl.meta["comments"].append("--END-ASTROPY-SERIALIZED-COLUMNS--")
return encode_tbl
def write_table_fits(input, output, overwrite=False, append=False):
"""
Write a Table object to a FITS file
Parameters
----------
input : Table
The table to write out.
output : str
The filename to write the table to.
overwrite : bool
Whether to overwrite any existing file without warning.
append : bool
Whether to append the table to an existing file
"""
# Encode any mixin columns into standard Columns.
input = _encode_mixins(input)
table_hdu = table_to_hdu(input, character_as_bytes=True)
# Check if output file already exists
if isinstance(output, str) and os.path.exists(output):
if overwrite:
os.remove(output)
elif not append:
raise OSError(NOT_OVERWRITING_MSG.format(output))
if append:
# verify=False stops it reading and checking the existing file.
fits_append(output, table_hdu.data, table_hdu.header, verify=False)
else:
table_hdu.writeto(output)
io_registry.register_reader("fits", Table, read_table_fits)
io_registry.register_writer("fits", Table, write_table_fits)
io_registry.register_identifier("fits", Table, is_fits)
|
38a42a398128612b7af074e58e217d4df4e67fa24b0b0ed505547612fc6544dc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Facilities for diffing two FITS files. Includes objects for diffing entire
FITS files, individual HDUs, FITS headers, or just FITS data.
Used to implement the fitsdiff program.
"""
import fnmatch
import glob
import io
import operator
import os
import os.path
import textwrap
from collections import defaultdict
from inspect import signature
from itertools import islice
import numpy as np
from astropy import __version__
from astropy.utils.diff import (
diff_values,
fixed_width_indent,
report_diff_values,
where_not_allclose,
)
from astropy.utils.misc import NOT_OVERWRITING_MSG
from .card import BLANK_CARD, Card
# HDUList is used in one of the doctests
from .hdu.hdulist import HDUList, fitsopen # pylint: disable=W0611
from .hdu.table import _TableLikeHDU
from .header import Header
from .util import path_like
__all__ = [
"FITSDiff",
"HDUDiff",
"HeaderDiff",
"ImageDataDiff",
"RawDataDiff",
"TableDataDiff",
]
# Column attributes of interest for comparison
_COL_ATTRS = [
("unit", "units"),
("null", "null values"),
("bscale", "bscales"),
("bzero", "bzeros"),
("disp", "display formats"),
("dim", "dimensions"),
]
class _BaseDiff:
"""
Base class for all FITS diff objects.
When instantiating a FITS diff object, the first two arguments are always
the two objects to diff (two FITS files, two FITS headers, etc.).
Instantiating a ``_BaseDiff`` also causes the diff itself to be executed.
The returned ``_BaseDiff`` instance has a number of attribute that describe
the results of the diff operation.
The most basic attribute, present on all ``_BaseDiff`` instances, is
``.identical`` which is `True` if the two objects being compared are
identical according to the diff method for objects of that type.
"""
def __init__(self, a, b):
"""
The ``_BaseDiff`` class does not implement a ``_diff`` method and
should not be instantiated directly. Instead instantiate the
appropriate subclass of ``_BaseDiff`` for the objects being compared
(for example, use `HeaderDiff` to compare two `Header` objects.
"""
self.a = a
self.b = b
# For internal use in report output
self._fileobj = None
self._indent = 0
self._diff()
def __bool__(self):
"""
A ``_BaseDiff`` object acts as `True` in a boolean context if the two
objects compared are different. Otherwise it acts as `False`.
"""
return not self.identical
@classmethod
def fromdiff(cls, other, a, b):
"""
Returns a new Diff object of a specific subclass from an existing diff
object, passing on the values for any arguments they share in common
(such as ignore_keywords).
For example::
>>> from astropy.io import fits
>>> hdul1, hdul2 = fits.HDUList(), fits.HDUList()
>>> headera, headerb = fits.Header(), fits.Header()
>>> fd = fits.FITSDiff(hdul1, hdul2, ignore_keywords=['*'])
>>> hd = fits.HeaderDiff.fromdiff(fd, headera, headerb)
>>> list(hd.ignore_keywords)
['*']
"""
sig = signature(cls.__init__)
# The first 3 arguments of any Diff initializer are self, a, and b.
kwargs = {}
for arg in list(sig.parameters.keys())[3:]:
if hasattr(other, arg):
kwargs[arg] = getattr(other, arg)
return cls(a, b, **kwargs)
@property
def identical(self):
"""
`True` if all the ``.diff_*`` attributes on this diff instance are
empty, implying that no differences were found.
Any subclass of ``_BaseDiff`` must have at least one ``.diff_*``
attribute, which contains a non-empty value if and only if some
difference was found between the two objects being compared.
"""
return not any(
getattr(self, attr) for attr in self.__dict__ if attr.startswith("diff_")
)
def report(self, fileobj=None, indent=0, overwrite=False):
"""
Generates a text report on the differences (if any) between two
objects, and either returns it as a string or writes it to a file-like
object.
Parameters
----------
fileobj : file-like, string, or None, optional
If `None`, this method returns the report as a string. Otherwise it
returns `None` and writes the report to the given file-like object
(which must have a ``.write()`` method at a minimum), or to a new
file at the path specified.
indent : int
The number of 4 space tabs to indent the report.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
Returns
-------
report : str or None
"""
return_string = False
filepath = None
if isinstance(fileobj, path_like):
fileobj = os.path.expanduser(fileobj)
if os.path.exists(fileobj) and not overwrite:
raise OSError(NOT_OVERWRITING_MSG.format(fileobj))
else:
filepath = fileobj
fileobj = open(filepath, "w")
elif fileobj is None:
fileobj = io.StringIO()
return_string = True
self._fileobj = fileobj
self._indent = indent # This is used internally by _writeln
try:
self._report()
finally:
if filepath:
fileobj.close()
if return_string:
return fileobj.getvalue()
def _writeln(self, text):
self._fileobj.write(fixed_width_indent(text, self._indent) + "\n")
def _diff(self):
raise NotImplementedError
def _report(self):
raise NotImplementedError
class FITSDiff(_BaseDiff):
"""Diff two FITS files by filename, or two `HDUList` objects.
`FITSDiff` objects have the following diff attributes:
- ``diff_hdu_count``: If the FITS files being compared have different
numbers of HDUs, this contains a 2-tuple of the number of HDUs in each
file.
- ``diff_hdus``: If any HDUs with the same index are different, this
contains a list of 2-tuples of the HDU index and the `HDUDiff` object
representing the differences between the two HDUs.
"""
def __init__(
self,
a,
b,
ignore_hdus=[],
ignore_keywords=[],
ignore_comments=[],
ignore_fields=[],
numdiffs=10,
rtol=0.0,
atol=0.0,
ignore_blanks=True,
ignore_blank_cards=True,
):
"""
Parameters
----------
a : str or `HDUList`
The filename of a FITS file on disk, or an `HDUList` object.
b : str or `HDUList`
The filename of a FITS file on disk, or an `HDUList` object to
compare to the first file.
ignore_hdus : sequence, optional
HDU names to ignore when comparing two FITS files or HDU lists; the
presence of these HDUs and their contents are ignored. Wildcard
strings may also be included in the list.
ignore_keywords : sequence, optional
Header keywords to ignore when comparing two headers; the presence
of these keywords and their values are ignored. Wildcard strings
may also be included in the list.
ignore_comments : sequence, optional
A list of header keywords whose comments should be ignored in the
comparison. May contain wildcard strings as with ignore_keywords.
ignore_fields : sequence, optional
The (case-insensitive) names of any table columns to ignore if any
table data is to be compared.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionadded:: 2.0
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
ignore_blanks : bool, optional
Ignore extra whitespace at the end of string values either in
headers or data. Extra leading whitespace is not ignored
(default: True).
ignore_blank_cards : bool, optional
Ignore all cards that are blank, i.e. they only contain
whitespace (default: True).
"""
if isinstance(a, (str, os.PathLike)):
try:
a = fitsopen(a)
except Exception as exc:
raise OSError(
"error opening file a ({}): {}: {}".format(
a, exc.__class__.__name__, exc.args[0]
)
)
close_a = True
else:
close_a = False
if isinstance(b, (str, os.PathLike)):
try:
b = fitsopen(b)
except Exception as exc:
raise OSError(
"error opening file b ({}): {}: {}".format(
b, exc.__class__.__name__, exc.args[0]
)
)
close_b = True
else:
close_b = False
# Normalize keywords/fields to ignore to upper case
self.ignore_hdus = {k.upper() for k in ignore_hdus}
self.ignore_keywords = {k.upper() for k in ignore_keywords}
self.ignore_comments = {k.upper() for k in ignore_comments}
self.ignore_fields = {k.upper() for k in ignore_fields}
self.numdiffs = numdiffs
self.rtol = rtol
self.atol = atol
self.ignore_blanks = ignore_blanks
self.ignore_blank_cards = ignore_blank_cards
# Some hdu names may be pattern wildcards. Find them.
self.ignore_hdu_patterns = set()
for name in list(self.ignore_hdus):
if name != "*" and glob.has_magic(name):
self.ignore_hdus.remove(name)
self.ignore_hdu_patterns.add(name)
self.diff_hdu_count = ()
self.diff_hdus = []
try:
super().__init__(a, b)
finally:
if close_a:
a.close()
if close_b:
b.close()
def _diff(self):
if len(self.a) != len(self.b):
self.diff_hdu_count = (len(self.a), len(self.b))
# Record filenames for use later in _report
self.filenamea = self.a.filename()
if not self.filenamea:
self.filenamea = f"<{self.a.__class__.__name__} object at {id(self.a):#x}>"
self.filenameb = self.b.filename()
if not self.filenameb:
self.filenameb = f"<{self.b.__class__.__name__} object at {id(self.b):#x}>"
if self.ignore_hdus:
self.a = HDUList([h for h in self.a if h.name not in self.ignore_hdus])
self.b = HDUList([h for h in self.b if h.name not in self.ignore_hdus])
if self.ignore_hdu_patterns:
a_names = [hdu.name for hdu in self.a]
b_names = [hdu.name for hdu in self.b]
for pattern in self.ignore_hdu_patterns:
self.a = HDUList(
[
h
for h in self.a
if h.name not in fnmatch.filter(a_names, pattern)
]
)
self.b = HDUList(
[
h
for h in self.b
if h.name not in fnmatch.filter(b_names, pattern)
]
)
# For now, just compare the extensions one by one in order.
# Might allow some more sophisticated types of diffing later.
# TODO: Somehow or another simplify the passing around of diff
# options--this will become important as the number of options grows
for idx in range(min(len(self.a), len(self.b))):
hdu_diff = HDUDiff.fromdiff(self, self.a[idx], self.b[idx])
if not hdu_diff.identical:
if (
self.a[idx].name == self.b[idx].name
and self.a[idx].ver == self.b[idx].ver
):
self.diff_hdus.append(
(idx, hdu_diff, self.a[idx].name, self.a[idx].ver)
)
else:
self.diff_hdus.append((idx, hdu_diff, "", self.a[idx].ver))
def _report(self):
wrapper = textwrap.TextWrapper(initial_indent=" ", subsequent_indent=" ")
self._fileobj.write("\n")
self._writeln(f" fitsdiff: {__version__}")
self._writeln(f" a: {self.filenamea}\n b: {self.filenameb}")
if self.ignore_hdus:
ignore_hdus = " ".join(sorted(self.ignore_hdus))
self._writeln(f" HDU(s) not to be compared:\n{wrapper.fill(ignore_hdus)}")
if self.ignore_hdu_patterns:
ignore_hdu_patterns = " ".join(sorted(self.ignore_hdu_patterns))
self._writeln(
" HDU(s) not to be compared:\n{}".format(
wrapper.fill(ignore_hdu_patterns)
)
)
if self.ignore_keywords:
ignore_keywords = " ".join(sorted(self.ignore_keywords))
self._writeln(
" Keyword(s) not to be compared:\n{}".format(
wrapper.fill(ignore_keywords)
)
)
if self.ignore_comments:
ignore_comments = " ".join(sorted(self.ignore_comments))
self._writeln(
" Keyword(s) whose comments are not to be compared:\n{}".format(
wrapper.fill(ignore_comments)
)
)
if self.ignore_fields:
ignore_fields = " ".join(sorted(self.ignore_fields))
self._writeln(
" Table column(s) not to be compared:\n{}".format(
wrapper.fill(ignore_fields)
)
)
self._writeln(
" Maximum number of different data values to be reported: {}".format(
self.numdiffs
)
)
self._writeln(
" Relative tolerance: {}, Absolute tolerance: {}".format(
self.rtol, self.atol
)
)
if self.diff_hdu_count:
self._fileobj.write("\n")
self._writeln("Files contain different numbers of HDUs:")
self._writeln(f" a: {self.diff_hdu_count[0]}")
self._writeln(f" b: {self.diff_hdu_count[1]}")
if not self.diff_hdus:
self._writeln("No differences found between common HDUs.")
return
elif not self.diff_hdus:
self._fileobj.write("\n")
self._writeln("No differences found.")
return
for idx, hdu_diff, extname, extver in self.diff_hdus:
# print out the extension heading
if idx == 0:
self._fileobj.write("\n")
self._writeln("Primary HDU:")
else:
self._fileobj.write("\n")
if extname:
self._writeln(f"Extension HDU {idx} ({extname}, {extver}):")
else:
self._writeln(f"Extension HDU {idx}:")
hdu_diff.report(self._fileobj, indent=self._indent + 1)
class HDUDiff(_BaseDiff):
"""
Diff two HDU objects, including their headers and their data (but only if
both HDUs contain the same type of data (image, table, or unknown).
`HDUDiff` objects have the following diff attributes:
- ``diff_extnames``: If the two HDUs have different EXTNAME values, this
contains a 2-tuple of the different extension names.
- ``diff_extvers``: If the two HDUS have different EXTVER values, this
contains a 2-tuple of the different extension versions.
- ``diff_extlevels``: If the two HDUs have different EXTLEVEL values, this
contains a 2-tuple of the different extension levels.
- ``diff_extension_types``: If the two HDUs have different XTENSION values,
this contains a 2-tuple of the different extension types.
- ``diff_headers``: Contains a `HeaderDiff` object for the headers of the
two HDUs. This will always contain an object--it may be determined
whether the headers are different through ``diff_headers.identical``.
- ``diff_data``: Contains either a `ImageDataDiff`, `TableDataDiff`, or
`RawDataDiff` as appropriate for the data in the HDUs, and only if the
two HDUs have non-empty data of the same type (`RawDataDiff` is used for
HDUs containing non-empty data of an indeterminate type).
"""
def __init__(
self,
a,
b,
ignore_keywords=[],
ignore_comments=[],
ignore_fields=[],
numdiffs=10,
rtol=0.0,
atol=0.0,
ignore_blanks=True,
ignore_blank_cards=True,
):
"""
Parameters
----------
a : BaseHDU
An HDU object.
b : BaseHDU
An HDU object to compare to the first HDU object.
ignore_keywords : sequence, optional
Header keywords to ignore when comparing two headers; the presence
of these keywords and their values are ignored. Wildcard strings
may also be included in the list.
ignore_comments : sequence, optional
A list of header keywords whose comments should be ignored in the
comparison. May contain wildcard strings as with ignore_keywords.
ignore_fields : sequence, optional
The (case-insensitive) names of any table columns to ignore if any
table data is to be compared.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionadded:: 2.0
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
ignore_blanks : bool, optional
Ignore extra whitespace at the end of string values either in
headers or data. Extra leading whitespace is not ignored
(default: True).
ignore_blank_cards : bool, optional
Ignore all cards that are blank, i.e. they only contain
whitespace (default: True).
"""
self.ignore_keywords = {k.upper() for k in ignore_keywords}
self.ignore_comments = {k.upper() for k in ignore_comments}
self.ignore_fields = {k.upper() for k in ignore_fields}
self.rtol = rtol
self.atol = atol
self.numdiffs = numdiffs
self.ignore_blanks = ignore_blanks
self.ignore_blank_cards = ignore_blank_cards
self.diff_extnames = ()
self.diff_extvers = ()
self.diff_extlevels = ()
self.diff_extension_types = ()
self.diff_headers = None
self.diff_data = None
super().__init__(a, b)
def _diff(self):
if self.a.name != self.b.name:
self.diff_extnames = (self.a.name, self.b.name)
if self.a.ver != self.b.ver:
self.diff_extvers = (self.a.ver, self.b.ver)
if self.a.level != self.b.level:
self.diff_extlevels = (self.a.level, self.b.level)
if self.a.header.get("XTENSION") != self.b.header.get("XTENSION"):
self.diff_extension_types = (
self.a.header.get("XTENSION"),
self.b.header.get("XTENSION"),
)
self.diff_headers = HeaderDiff.fromdiff(
self, self.a.header.copy(), self.b.header.copy()
)
if self.a.data is None or self.b.data is None:
# TODO: Perhaps have some means of marking this case
pass
elif self.a.is_image and self.b.is_image:
self.diff_data = ImageDataDiff.fromdiff(self, self.a.data, self.b.data)
# Clean up references to (possibly) memmapped arrays so they can
# be closed by .close()
self.diff_data.a = None
self.diff_data.b = None
elif isinstance(self.a, _TableLikeHDU) and isinstance(self.b, _TableLikeHDU):
# TODO: Replace this if/when _BaseHDU grows a .is_table property
self.diff_data = TableDataDiff.fromdiff(self, self.a.data, self.b.data)
# Clean up references to (possibly) memmapped arrays so they can
# be closed by .close()
self.diff_data.a = None
self.diff_data.b = None
elif not self.diff_extension_types:
# Don't diff the data for unequal extension types that are not
# recognized image or table types
self.diff_data = RawDataDiff.fromdiff(self, self.a.data, self.b.data)
# Clean up references to (possibly) memmapped arrays so they can
# be closed by .close()
self.diff_data.a = None
self.diff_data.b = None
def _report(self):
if self.identical:
self._writeln(" No differences found.")
if self.diff_extension_types:
self._writeln(
" Extension types differ:\n a: {}\n b: {}".format(
*self.diff_extension_types
)
)
if self.diff_extnames:
self._writeln(
" Extension names differ:\n a: {}\n b: {}".format(*self.diff_extnames)
)
if self.diff_extvers:
self._writeln(
" Extension versions differ:\n a: {}\n b: {}".format(
*self.diff_extvers
)
)
if self.diff_extlevels:
self._writeln(
" Extension levels differ:\n a: {}\n b: {}".format(
*self.diff_extlevels
)
)
if not self.diff_headers.identical:
self._fileobj.write("\n")
self._writeln(" Headers contain differences:")
self.diff_headers.report(self._fileobj, indent=self._indent + 1)
if self.diff_data is not None and not self.diff_data.identical:
self._fileobj.write("\n")
self._writeln(" Data contains differences:")
self.diff_data.report(self._fileobj, indent=self._indent + 1)
class HeaderDiff(_BaseDiff):
"""
Diff two `Header` objects.
`HeaderDiff` objects have the following diff attributes:
- ``diff_keyword_count``: If the two headers contain a different number of
keywords, this contains a 2-tuple of the keyword count for each header.
- ``diff_keywords``: If either header contains one or more keywords that
don't appear at all in the other header, this contains a 2-tuple
consisting of a list of the keywords only appearing in header a, and a
list of the keywords only appearing in header b.
- ``diff_duplicate_keywords``: If a keyword appears in both headers at
least once, but contains a different number of duplicates (for example, a
different number of HISTORY cards in each header), an item is added to
this dict with the keyword as the key, and a 2-tuple of the different
counts of that keyword as the value. For example::
{'HISTORY': (20, 19)}
means that header a contains 20 HISTORY cards, while header b contains
only 19 HISTORY cards.
- ``diff_keyword_values``: If any of the common keyword between the two
headers have different values, they appear in this dict. It has a
structure similar to ``diff_duplicate_keywords``, with the keyword as the
key, and a 2-tuple of the different values as the value. For example::
{'NAXIS': (2, 3)}
means that the NAXIS keyword has a value of 2 in header a, and a value of
3 in header b. This excludes any keywords matched by the
``ignore_keywords`` list.
- ``diff_keyword_comments``: Like ``diff_keyword_values``, but contains
differences between keyword comments.
`HeaderDiff` objects also have a ``common_keywords`` attribute that lists
all keywords that appear in both headers.
"""
def __init__(
self,
a,
b,
ignore_keywords=[],
ignore_comments=[],
rtol=0.0,
atol=0.0,
ignore_blanks=True,
ignore_blank_cards=True,
):
"""
Parameters
----------
a : `~astropy.io.fits.Header` or string or bytes
A header.
b : `~astropy.io.fits.Header` or string or bytes
A header to compare to the first header.
ignore_keywords : sequence, optional
Header keywords to ignore when comparing two headers; the presence
of these keywords and their values are ignored. Wildcard strings
may also be included in the list.
ignore_comments : sequence, optional
A list of header keywords whose comments should be ignored in the
comparison. May contain wildcard strings as with ignore_keywords.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionadded:: 2.0
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
ignore_blanks : bool, optional
Ignore extra whitespace at the end of string values either in
headers or data. Extra leading whitespace is not ignored
(default: True).
ignore_blank_cards : bool, optional
Ignore all cards that are blank, i.e. they only contain
whitespace (default: True).
"""
self.ignore_keywords = {k.upper() for k in ignore_keywords}
self.ignore_comments = {k.upper() for k in ignore_comments}
self.rtol = rtol
self.atol = atol
self.ignore_blanks = ignore_blanks
self.ignore_blank_cards = ignore_blank_cards
self.ignore_keyword_patterns = set()
self.ignore_comment_patterns = set()
for keyword in list(self.ignore_keywords):
keyword = keyword.upper()
if keyword != "*" and glob.has_magic(keyword):
self.ignore_keywords.remove(keyword)
self.ignore_keyword_patterns.add(keyword)
for keyword in list(self.ignore_comments):
keyword = keyword.upper()
if keyword != "*" and glob.has_magic(keyword):
self.ignore_comments.remove(keyword)
self.ignore_comment_patterns.add(keyword)
# Keywords appearing in each header
self.common_keywords = []
# Set to the number of keywords in each header if the counts differ
self.diff_keyword_count = ()
# Set if the keywords common to each header (excluding ignore_keywords)
# appear in different positions within the header
# TODO: Implement this
self.diff_keyword_positions = ()
# Keywords unique to each header (excluding keywords in
# ignore_keywords)
self.diff_keywords = ()
# Keywords that have different numbers of duplicates in each header
# (excluding keywords in ignore_keywords)
self.diff_duplicate_keywords = {}
# Keywords common to each header but having different values (excluding
# keywords in ignore_keywords)
self.diff_keyword_values = defaultdict(list)
# Keywords common to each header but having different comments
# (excluding keywords in ignore_keywords or in ignore_comments)
self.diff_keyword_comments = defaultdict(list)
if isinstance(a, str):
a = Header.fromstring(a)
if isinstance(b, str):
b = Header.fromstring(b)
if not (isinstance(a, Header) and isinstance(b, Header)):
raise TypeError(
"HeaderDiff can only diff astropy.io.fits.Header "
"objects or strings containing FITS headers."
)
super().__init__(a, b)
# TODO: This doesn't pay much attention to the *order* of the keywords,
# except in the case of duplicate keywords. The order should be checked
# too, or at least it should be an option.
def _diff(self):
if self.ignore_blank_cards:
cardsa = [c for c in self.a.cards if str(c) != BLANK_CARD]
cardsb = [c for c in self.b.cards if str(c) != BLANK_CARD]
else:
cardsa = list(self.a.cards)
cardsb = list(self.b.cards)
# build dictionaries of keyword values and comments
def get_header_values_comments(cards):
values = {}
comments = {}
for card in cards:
value = card.value
if self.ignore_blanks and isinstance(value, str):
value = value.rstrip()
values.setdefault(card.keyword, []).append(value)
comments.setdefault(card.keyword, []).append(card.comment)
return values, comments
valuesa, commentsa = get_header_values_comments(cardsa)
valuesb, commentsb = get_header_values_comments(cardsb)
# Normalize all keyword to upper-case for comparison's sake;
# TODO: HIERARCH keywords should be handled case-sensitively I think
keywordsa = {k.upper() for k in valuesa}
keywordsb = {k.upper() for k in valuesb}
self.common_keywords = sorted(keywordsa.intersection(keywordsb))
if len(cardsa) != len(cardsb):
self.diff_keyword_count = (len(cardsa), len(cardsb))
# Any other diff attributes should exclude ignored keywords
keywordsa = keywordsa.difference(self.ignore_keywords)
keywordsb = keywordsb.difference(self.ignore_keywords)
if self.ignore_keyword_patterns:
for pattern in self.ignore_keyword_patterns:
keywordsa = keywordsa.difference(fnmatch.filter(keywordsa, pattern))
keywordsb = keywordsb.difference(fnmatch.filter(keywordsb, pattern))
if "*" in self.ignore_keywords:
# Any other differences between keywords are to be ignored
return
left_only_keywords = sorted(keywordsa.difference(keywordsb))
right_only_keywords = sorted(keywordsb.difference(keywordsa))
if left_only_keywords or right_only_keywords:
self.diff_keywords = (left_only_keywords, right_only_keywords)
# Compare count of each common keyword
for keyword in self.common_keywords:
if keyword in self.ignore_keywords:
continue
if self.ignore_keyword_patterns:
skip = False
for pattern in self.ignore_keyword_patterns:
if fnmatch.fnmatch(keyword, pattern):
skip = True
break
if skip:
continue
counta = len(valuesa[keyword])
countb = len(valuesb[keyword])
if counta != countb:
self.diff_duplicate_keywords[keyword] = (counta, countb)
# Compare keywords' values and comments
for a, b in zip(valuesa[keyword], valuesb[keyword]):
if diff_values(a, b, rtol=self.rtol, atol=self.atol):
self.diff_keyword_values[keyword].append((a, b))
else:
# If there are duplicate keywords we need to be able to
# index each duplicate; if the values of a duplicate
# are identical use None here
self.diff_keyword_values[keyword].append(None)
if not any(self.diff_keyword_values[keyword]):
# No differences found; delete the array of Nones
del self.diff_keyword_values[keyword]
if "*" in self.ignore_comments or keyword in self.ignore_comments:
continue
if self.ignore_comment_patterns:
skip = False
for pattern in self.ignore_comment_patterns:
if fnmatch.fnmatch(keyword, pattern):
skip = True
break
if skip:
continue
for a, b in zip(commentsa[keyword], commentsb[keyword]):
if diff_values(a, b):
self.diff_keyword_comments[keyword].append((a, b))
else:
self.diff_keyword_comments[keyword].append(None)
if not any(self.diff_keyword_comments[keyword]):
del self.diff_keyword_comments[keyword]
def _report(self):
if self.diff_keyword_count:
self._writeln(" Headers have different number of cards:")
self._writeln(f" a: {self.diff_keyword_count[0]}")
self._writeln(f" b: {self.diff_keyword_count[1]}")
if self.diff_keywords:
for keyword in self.diff_keywords[0]:
if keyword in Card._commentary_keywords:
val = self.a[keyword][0]
else:
val = self.a[keyword]
self._writeln(f" Extra keyword {keyword!r:8} in a: {val!r}")
for keyword in self.diff_keywords[1]:
if keyword in Card._commentary_keywords:
val = self.b[keyword][0]
else:
val = self.b[keyword]
self._writeln(f" Extra keyword {keyword!r:8} in b: {val!r}")
if self.diff_duplicate_keywords:
for keyword, count in sorted(self.diff_duplicate_keywords.items()):
self._writeln(f" Inconsistent duplicates of keyword {keyword!r:8}:")
self._writeln(
" Occurs {} time(s) in a, {} times in (b)".format(*count)
)
if self.diff_keyword_values or self.diff_keyword_comments:
for keyword in self.common_keywords:
report_diff_keyword_attr(
self._fileobj,
"values",
self.diff_keyword_values,
keyword,
ind=self._indent,
)
report_diff_keyword_attr(
self._fileobj,
"comments",
self.diff_keyword_comments,
keyword,
ind=self._indent,
)
# TODO: It might be good if there was also a threshold option for percentage of
# different pixels: For example ignore if only 1% of the pixels are different
# within some threshold. There are lots of possibilities here, but hold off
# for now until specific cases come up.
class ImageDataDiff(_BaseDiff):
"""
Diff two image data arrays (really any array from a PRIMARY HDU or an IMAGE
extension HDU, though the data unit is assumed to be "pixels").
`ImageDataDiff` objects have the following diff attributes:
- ``diff_dimensions``: If the two arrays contain either a different number
of dimensions or different sizes in any dimension, this contains a
2-tuple of the shapes of each array. Currently no further comparison is
performed on images that don't have the exact same dimensions.
- ``diff_pixels``: If the two images contain any different pixels, this
contains a list of 2-tuples of the array index where the difference was
found, and another 2-tuple containing the different values. For example,
if the pixel at (0, 0) contains different values this would look like::
[(0, 0), (1.1, 2.2)]
where 1.1 and 2.2 are the values of that pixel in each array. This
array only contains up to ``self.numdiffs`` differences, for storage
efficiency.
- ``diff_total``: The total number of different pixels found between the
arrays. Although ``diff_pixels`` does not necessarily contain all the
different pixel values, this can be used to get a count of the total
number of differences found.
- ``diff_ratio``: Contains the ratio of ``diff_total`` to the total number
of pixels in the arrays.
"""
def __init__(self, a, b, numdiffs=10, rtol=0.0, atol=0.0):
"""
Parameters
----------
a : BaseHDU
An HDU object.
b : BaseHDU
An HDU object to compare to the first HDU object.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionadded:: 2.0
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
"""
self.numdiffs = numdiffs
self.rtol = rtol
self.atol = atol
self.diff_dimensions = ()
self.diff_pixels = []
self.diff_ratio = 0
# self.diff_pixels only holds up to numdiffs differing pixels, but this
# self.diff_total stores the total count of differences between
# the images, but not the different values
self.diff_total = 0
super().__init__(a, b)
def _diff(self):
if self.a.shape != self.b.shape:
self.diff_dimensions = (self.a.shape, self.b.shape)
# Don't do any further comparison if the dimensions differ
# TODO: Perhaps we could, however, diff just the intersection
# between the two images
return
# Find the indices where the values are not equal
# If neither a nor b are floating point (or complex), ignore rtol and
# atol
if not (
np.issubdtype(self.a.dtype, np.inexact)
or np.issubdtype(self.b.dtype, np.inexact)
):
rtol = 0
atol = 0
else:
rtol = self.rtol
atol = self.atol
diffs = where_not_allclose(self.a, self.b, atol=atol, rtol=rtol)
self.diff_total = len(diffs[0])
if self.diff_total == 0:
# Then we're done
return
if self.numdiffs < 0:
numdiffs = self.diff_total
else:
numdiffs = self.numdiffs
self.diff_pixels = [
(idx, (self.a[idx], self.b[idx]))
for idx in islice(zip(*diffs), 0, numdiffs)
]
self.diff_ratio = float(self.diff_total) / float(len(self.a.flat))
def _report(self):
if self.diff_dimensions:
dimsa = " x ".join(str(d) for d in reversed(self.diff_dimensions[0]))
dimsb = " x ".join(str(d) for d in reversed(self.diff_dimensions[1]))
self._writeln(" Data dimensions differ:")
self._writeln(f" a: {dimsa}")
self._writeln(f" b: {dimsb}")
# For now we don't do any further comparison if the dimensions
# differ; though in the future it might be nice to be able to
# compare at least where the images intersect
self._writeln(" No further data comparison performed.")
return
if not self.diff_pixels:
return
for index, values in self.diff_pixels:
index = [x + 1 for x in reversed(index)]
self._writeln(f" Data differs at {index}:")
report_diff_values(
values[0],
values[1],
fileobj=self._fileobj,
indent_width=self._indent + 1,
rtol=self.rtol,
atol=self.atol,
)
if self.diff_total > self.numdiffs:
self._writeln(" ...")
self._writeln(
" {} different pixels found ({:.2%} different).".format(
self.diff_total, self.diff_ratio
)
)
class RawDataDiff(ImageDataDiff):
"""
`RawDataDiff` is just a special case of `ImageDataDiff` where the images
are one-dimensional, and the data is treated as a 1-dimensional array of
bytes instead of pixel values. This is used to compare the data of two
non-standard extension HDUs that were not recognized as containing image or
table data.
`ImageDataDiff` objects have the following diff attributes:
- ``diff_dimensions``: Same as the ``diff_dimensions`` attribute of
`ImageDataDiff` objects. Though the "dimension" of each array is just an
integer representing the number of bytes in the data.
- ``diff_bytes``: Like the ``diff_pixels`` attribute of `ImageDataDiff`
objects, but renamed to reflect the minor semantic difference that these
are raw bytes and not pixel values. Also the indices are integers
instead of tuples.
- ``diff_total`` and ``diff_ratio``: Same as `ImageDataDiff`.
"""
def __init__(self, a, b, numdiffs=10):
"""
Parameters
----------
a : BaseHDU
An HDU object.
b : BaseHDU
An HDU object to compare to the first HDU object.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
"""
self.diff_dimensions = ()
self.diff_bytes = []
super().__init__(a, b, numdiffs=numdiffs)
def _diff(self):
super()._diff()
if self.diff_dimensions:
self.diff_dimensions = (
self.diff_dimensions[0][0],
self.diff_dimensions[1][0],
)
self.diff_bytes = [(x[0], y) for x, y in self.diff_pixels]
del self.diff_pixels
def _report(self):
if self.diff_dimensions:
self._writeln(" Data sizes differ:")
self._writeln(f" a: {self.diff_dimensions[0]} bytes")
self._writeln(f" b: {self.diff_dimensions[1]} bytes")
# For now we don't do any further comparison if the dimensions
# differ; though in the future it might be nice to be able to
# compare at least where the images intersect
self._writeln(" No further data comparison performed.")
return
if not self.diff_bytes:
return
for index, values in self.diff_bytes:
self._writeln(f" Data differs at byte {index}:")
report_diff_values(
values[0],
values[1],
fileobj=self._fileobj,
indent_width=self._indent + 1,
rtol=self.rtol,
atol=self.atol,
)
self._writeln(" ...")
self._writeln(
" {} different bytes found ({:.2%} different).".format(
self.diff_total, self.diff_ratio
)
)
class TableDataDiff(_BaseDiff):
"""
Diff two table data arrays. It doesn't matter whether the data originally
came from a binary or ASCII table--the data should be passed in as a
recarray.
`TableDataDiff` objects have the following diff attributes:
- ``diff_column_count``: If the tables being compared have different
numbers of columns, this contains a 2-tuple of the column count in each
table. Even if the tables have different column counts, an attempt is
still made to compare any columns they have in common.
- ``diff_columns``: If either table contains columns unique to that table,
either in name or format, this contains a 2-tuple of lists. The first
element is a list of columns (these are full `Column` objects) that
appear only in table a. The second element is a list of tables that
appear only in table b. This only lists columns with different column
definitions, and has nothing to do with the data in those columns.
- ``diff_column_names``: This is like ``diff_columns``, but lists only the
names of columns unique to either table, rather than the full `Column`
objects.
- ``diff_column_attributes``: Lists columns that are in both tables but
have different secondary attributes, such as TUNIT or TDISP. The format
is a list of 2-tuples: The first a tuple of the column name and the
attribute, the second a tuple of the different values.
- ``diff_values``: `TableDataDiff` compares the data in each table on a
column-by-column basis. If any different data is found, it is added to
this list. The format of this list is similar to the ``diff_pixels``
attribute on `ImageDataDiff` objects, though the "index" consists of a
(column_name, row) tuple. For example::
[('TARGET', 0), ('NGC1001', 'NGC1002')]
shows that the tables contain different values in the 0-th row of the
'TARGET' column.
- ``diff_total`` and ``diff_ratio``: Same as `ImageDataDiff`.
`TableDataDiff` objects also have a ``common_columns`` attribute that lists
the `Column` objects for columns that are identical in both tables, and a
``common_column_names`` attribute which contains a set of the names of
those columns.
"""
def __init__(self, a, b, ignore_fields=[], numdiffs=10, rtol=0.0, atol=0.0):
"""
Parameters
----------
a : BaseHDU
An HDU object.
b : BaseHDU
An HDU object to compare to the first HDU object.
ignore_fields : sequence, optional
The (case-insensitive) names of any table columns to ignore if any
table data is to be compared.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionadded:: 2.0
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
"""
self.ignore_fields = set(ignore_fields)
self.numdiffs = numdiffs
self.rtol = rtol
self.atol = atol
self.common_columns = []
self.common_column_names = set()
# self.diff_columns contains columns with different column definitions,
# but not different column data. Column data is only compared in
# columns that have the same definitions
self.diff_rows = ()
self.diff_column_count = ()
self.diff_columns = ()
# If two columns have the same name+format, but other attributes are
# different (such as TUNIT or such) they are listed here
self.diff_column_attributes = []
# Like self.diff_columns, but just contains a list of the column names
# unique to each table, and in the order they appear in the tables
self.diff_column_names = ()
self.diff_values = []
self.diff_ratio = 0
self.diff_total = 0
super().__init__(a, b)
def _diff(self):
# Much of the code for comparing columns is similar to the code for
# comparing headers--consider refactoring
colsa = self.a.columns
colsb = self.b.columns
if len(colsa) != len(colsb):
self.diff_column_count = (len(colsa), len(colsb))
# Even if the number of columns are unequal, we still do comparison of
# any common columns
colsa = {c.name.lower(): c for c in colsa}
colsb = {c.name.lower(): c for c in colsb}
if "*" in self.ignore_fields:
# If all columns are to be ignored, ignore any further differences
# between the columns
return
# Keep the user's original ignore_fields list for reporting purposes,
# but internally use a case-insensitive version
ignore_fields = {f.lower() for f in self.ignore_fields}
# It might be nice if there were a cleaner way to do this, but for now
# it'll do
for fieldname in ignore_fields:
fieldname = fieldname.lower()
if fieldname in colsa:
del colsa[fieldname]
if fieldname in colsb:
del colsb[fieldname]
colsa_set = set(colsa.values())
colsb_set = set(colsb.values())
self.common_columns = sorted(
colsa_set.intersection(colsb_set), key=operator.attrgetter("name")
)
self.common_column_names = {col.name.lower() for col in self.common_columns}
left_only_columns = {
col.name.lower(): col for col in colsa_set.difference(colsb_set)
}
right_only_columns = {
col.name.lower(): col for col in colsb_set.difference(colsa_set)
}
if left_only_columns or right_only_columns:
self.diff_columns = (left_only_columns, right_only_columns)
self.diff_column_names = ([], [])
if left_only_columns:
for col in self.a.columns:
if col.name.lower() in left_only_columns:
self.diff_column_names[0].append(col.name)
if right_only_columns:
for col in self.b.columns:
if col.name.lower() in right_only_columns:
self.diff_column_names[1].append(col.name)
# If the tables have a different number of rows, we don't compare the
# columns right now.
# TODO: It might be nice to optionally compare the first n rows where n
# is the minimum of the row counts between the two tables.
if len(self.a) != len(self.b):
self.diff_rows = (len(self.a), len(self.b))
return
# If the tables contain no rows there's no data to compare, so we're
# done at this point. (See ticket #178)
if len(self.a) == len(self.b) == 0:
return
# Like in the old fitsdiff, compare tables on a column by column basis
# The difficulty here is that, while FITS column names are meant to be
# case-insensitive, Astropy still allows, for the sake of flexibility,
# two columns with the same name but different case. When columns are
# accessed in FITS tables, a case-sensitive is tried first, and failing
# that a case-insensitive match is made.
# It's conceivable that the same column could appear in both tables
# being compared, but with different case.
# Though it *may* lead to inconsistencies in these rare cases, this
# just assumes that there are no duplicated column names in either
# table, and that the column names can be treated case-insensitively.
for col in self.common_columns:
name_lower = col.name.lower()
if name_lower in ignore_fields:
continue
cola = colsa[name_lower]
colb = colsb[name_lower]
for attr, _ in _COL_ATTRS:
vala = getattr(cola, attr, None)
valb = getattr(colb, attr, None)
if diff_values(vala, valb):
self.diff_column_attributes.append(
((col.name.upper(), attr), (vala, valb))
)
arra = self.a[col.name]
arrb = self.b[col.name]
if np.issubdtype(arra.dtype, np.floating) and np.issubdtype(
arrb.dtype, np.floating
):
diffs = where_not_allclose(arra, arrb, rtol=self.rtol, atol=self.atol)
elif "P" in col.format:
diffs = (
[
idx
for idx in range(len(arra))
if not np.allclose(
arra[idx], arrb[idx], rtol=self.rtol, atol=self.atol
)
],
)
else:
diffs = np.where(arra != arrb)
self.diff_total += len(set(diffs[0]))
if self.numdiffs >= 0:
if len(self.diff_values) >= self.numdiffs:
# Don't save any more diff values
continue
# Add no more diff'd values than this
max_diffs = self.numdiffs - len(self.diff_values)
else:
max_diffs = len(diffs[0])
last_seen_idx = None
for idx in islice(diffs[0], 0, max_diffs):
if idx == last_seen_idx:
# Skip duplicate indices, which my occur when the column
# data contains multi-dimensional values; we're only
# interested in storing row-by-row differences
continue
last_seen_idx = idx
self.diff_values.append(((col.name, idx), (arra[idx], arrb[idx])))
total_values = len(self.a) * len(self.a.dtype.fields)
self.diff_ratio = float(self.diff_total) / float(total_values)
def _report(self):
if self.diff_column_count:
self._writeln(" Tables have different number of columns:")
self._writeln(f" a: {self.diff_column_count[0]}")
self._writeln(f" b: {self.diff_column_count[1]}")
if self.diff_column_names:
# Show columns with names unique to either table
for name in self.diff_column_names[0]:
format = self.diff_columns[0][name.lower()].format
self._writeln(f" Extra column {name} of format {format} in a")
for name in self.diff_column_names[1]:
format = self.diff_columns[1][name.lower()].format
self._writeln(f" Extra column {name} of format {format} in b")
col_attrs = dict(_COL_ATTRS)
# Now go through each table again and show columns with common
# names but other property differences...
for col_attr, vals in self.diff_column_attributes:
name, attr = col_attr
self._writeln(f" Column {name} has different {col_attrs[attr]}:")
report_diff_values(
vals[0],
vals[1],
fileobj=self._fileobj,
indent_width=self._indent + 1,
rtol=self.rtol,
atol=self.atol,
)
if self.diff_rows:
self._writeln(" Table rows differ:")
self._writeln(f" a: {self.diff_rows[0]}")
self._writeln(f" b: {self.diff_rows[1]}")
self._writeln(" No further data comparison performed.")
return
if not self.diff_values:
return
# Finally, let's go through and report column data differences:
for indx, values in self.diff_values:
self._writeln(" Column {} data differs in row {}:".format(*indx))
report_diff_values(
values[0],
values[1],
fileobj=self._fileobj,
indent_width=self._indent + 1,
rtol=self.rtol,
atol=self.atol,
)
if self.diff_values and self.numdiffs < self.diff_total:
self._writeln(
" ...{} additional difference(s) found.".format(
self.diff_total - self.numdiffs
)
)
if self.diff_total > self.numdiffs:
self._writeln(" ...")
self._writeln(
" {} different table data element(s) found ({:.2%} different).".format(
self.diff_total, self.diff_ratio
)
)
def report_diff_keyword_attr(fileobj, attr, diffs, keyword, ind=0):
"""
Write a diff between two header keyword values or comments to the specified
file-like object.
"""
if keyword in diffs:
vals = diffs[keyword]
for idx, val in enumerate(vals):
if val is None:
continue
if idx == 0:
dup = ""
else:
dup = f"[{idx + 1}]"
fileobj.write(
fixed_width_indent(
f" Keyword {keyword:8}{dup} has different {attr}:\n",
ind,
)
)
report_diff_values(val[0], val[1], fileobj=fileobj, indent_width=ind + 1)
|
7df812a5f9bd2da831dd3e59859006a3c6a0f4deb38f726ba680dd9de9f92705 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
"""
A package for reading and writing FITS files and manipulating their
contents.
A module for reading and writing Flexible Image Transport System
(FITS) files. This file format was endorsed by the International
Astronomical Union in 1999 and mandated by NASA as the standard format
for storing high energy astrophysics data. For details of the FITS
standard, see the NASA/Science Office of Standards and Technology
publication, NOST 100-2.0.
"""
from astropy import config as _config
# Set module-global boolean variables
# TODO: Make it possible to set these variables via environment variables
# again, once support for that is added to Astropy
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.io.fits`.
"""
enable_record_valued_keyword_cards = _config.ConfigItem(
True,
"If True, enable support for record-valued keywords as described by "
"FITS WCS distortion paper. Otherwise they are treated as normal "
"keywords.",
aliases=["astropy.io.fits.enabled_record_valued_keyword_cards"],
)
extension_name_case_sensitive = _config.ConfigItem(
False,
"If True, extension names (i.e. the ``EXTNAME`` keyword) should be "
"treated as case-sensitive.",
)
strip_header_whitespace = _config.ConfigItem(
True,
"If True, automatically remove trailing whitespace for string values in"
" headers. Otherwise the values are returned verbatim, with all "
"whitespace intact.",
)
use_memmap = _config.ConfigItem(
True,
"If True, use memory-mapped file access to read/write the data in "
"FITS files. This generally provides better performance, especially "
"for large files, but may affect performance in I/O-heavy "
"applications.",
)
lazy_load_hdus = _config.ConfigItem(
True,
"If True, use lazy loading of HDUs when opening FITS files by "
"default; that is fits.open() will only seek for and read HDUs on "
"demand rather than reading all HDUs at once. See the documentation "
"for fits.open() for more details.",
)
enable_uint = _config.ConfigItem(
True,
"If True, default to recognizing the convention for representing "
"unsigned integers in FITS--if an array has BITPIX > 0, BSCALE = 1, "
"and BZERO = 2**BITPIX, represent the data as unsigned integers "
"per this convention.",
)
conf = Conf()
# Public API compatibility imports
# These need to come after the global config variables, as some of the
# submodules use them
from . import card, column, convenience, hdu
from .card import *
from .column import *
from .convenience import *
from .diff import *
from .fitsrec import FITS_rec, FITS_record
from .hdu import *
from .hdu.groups import GroupData
from .hdu.hdulist import fitsopen as open
from .hdu.image import Section
from .header import Header
from .verify import VerifyError
__all__ = (
["Conf", "conf"]
+ card.__all__
+ column.__all__
+ convenience.__all__
+ hdu.__all__
+ [
"FITS_record",
"FITS_rec",
"GroupData",
"open",
"Section",
"Header",
"VerifyError",
"conf",
]
)
|
22bcc445ebd2e9d04a67c7c7feff42a26aec23176ddb6430a13987171f49be25 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import copy
import numbers
import operator
import re
import sys
import warnings
import weakref
from collections import OrderedDict
from contextlib import suppress
from functools import reduce
import numpy as np
from numpy import char as chararray
from astropy.utils import indent, isiterable, lazyproperty
from astropy.utils.exceptions import AstropyUserWarning
from .card import CARD_LENGTH, Card
from .util import NotifierMixin, _convert_array, _is_int, cmp, encode_ascii, pairwise
from .verify import VerifyError, VerifyWarning
__all__ = ["Column", "ColDefs", "Delayed"]
# mapping from TFORM data type to numpy data type (code)
# L: Logical (Boolean)
# B: Unsigned Byte
# I: 16-bit Integer
# J: 32-bit Integer
# K: 64-bit Integer
# E: Single-precision Floating Point
# D: Double-precision Floating Point
# C: Single-precision Complex
# M: Double-precision Complex
# A: Character
FITS2NUMPY = {
"L": "i1",
"B": "u1",
"I": "i2",
"J": "i4",
"K": "i8",
"E": "f4",
"D": "f8",
"C": "c8",
"M": "c16",
"A": "a",
}
# the inverse dictionary of the above
NUMPY2FITS = {val: key for key, val in FITS2NUMPY.items()}
# Normally booleans are represented as ints in Astropy, but if passed in a numpy
# boolean array, that should be supported
NUMPY2FITS["b1"] = "L"
# Add unsigned types, which will be stored as signed ints with a TZERO card.
NUMPY2FITS["u2"] = "I"
NUMPY2FITS["u4"] = "J"
NUMPY2FITS["u8"] = "K"
# Add half precision floating point numbers which will be up-converted to
# single precision.
NUMPY2FITS["f2"] = "E"
# This is the order in which values are converted to FITS types
# Note that only double precision floating point/complex are supported
FORMATORDER = ["L", "B", "I", "J", "K", "D", "M", "A"]
# Convert single precision floating point/complex to double precision.
FITSUPCONVERTERS = {"E": "D", "C": "M"}
# mapping from ASCII table TFORM data type to numpy data type
# A: Character
# I: Integer (32-bit)
# J: Integer (64-bit; non-standard)
# F: Float (64-bit; fixed decimal notation)
# E: Float (64-bit; exponential notation)
# D: Float (64-bit; exponential notation, always 64-bit by convention)
ASCII2NUMPY = {"A": "a", "I": "i4", "J": "i8", "F": "f8", "E": "f8", "D": "f8"}
# Maps FITS ASCII column format codes to the appropriate Python string
# formatting codes for that type.
ASCII2STR = {"A": "", "I": "d", "J": "d", "F": "f", "E": "E", "D": "E"}
# For each ASCII table format code, provides a default width (and decimal
# precision) for when one isn't given explicitly in the column format
ASCII_DEFAULT_WIDTHS = {
"A": (1, 0),
"I": (10, 0),
"J": (15, 0),
"E": (15, 7),
"F": (16, 7),
"D": (25, 17),
}
# TDISPn for both ASCII and Binary tables
TDISP_RE_DICT = {}
TDISP_RE_DICT["F"] = re.compile(
r"(?:(?P<formatc>[F])(?:(?P<width>[0-9]+)\.{1}" r"(?P<precision>[0-9])+)+)|"
)
TDISP_RE_DICT["A"] = TDISP_RE_DICT["L"] = re.compile(
r"(?:(?P<formatc>[AL])(?P<width>[0-9]+)+)|"
)
TDISP_RE_DICT["I"] = TDISP_RE_DICT["B"] = TDISP_RE_DICT["O"] = TDISP_RE_DICT[
"Z"
] = re.compile(
r"(?:(?P<formatc>[IBOZ])(?:(?P<width>[0-9]+)"
r"(?:\.{0,1}(?P<precision>[0-9]+))?))|"
)
TDISP_RE_DICT["E"] = TDISP_RE_DICT["G"] = TDISP_RE_DICT["D"] = re.compile(
r"(?:(?P<formatc>[EGD])(?:(?P<width>[0-9]+)\."
r"(?P<precision>[0-9]+))+)"
r"(?:E{0,1}(?P<exponential>[0-9]+)?)|"
)
TDISP_RE_DICT["EN"] = TDISP_RE_DICT["ES"] = re.compile(
r"(?:(?P<formatc>E[NS])(?:(?P<width>[0-9]+)\.{1}" r"(?P<precision>[0-9])+)+)"
)
# mapping from TDISP format to python format
# A: Character
# L: Logical (Boolean)
# I: 16-bit Integer
# Can't predefine zero padding and space padding before hand without
# knowing the value being formatted, so grabbing precision and using that
# to zero pad, ignoring width. Same with B, O, and Z
# B: Binary Integer
# O: Octal Integer
# Z: Hexadecimal Integer
# F: Float (64-bit; fixed decimal notation)
# EN: Float (engineering fortran format, exponential multiple of thee
# ES: Float (scientific, same as EN but non-zero leading digit
# E: Float, exponential notation
# Can't get exponential restriction to work without knowing value
# before hand, so just using width and precision, same with D, G, EN, and
# ES formats
# D: Double-precision Floating Point with exponential
# (E but for double precision)
# G: Double-precision Floating Point, may or may not show exponent
TDISP_FMT_DICT = {
"I": "{{:{width}d}}",
"B": "{{:{width}b}}",
"O": "{{:{width}o}}",
"Z": "{{:{width}x}}",
"F": "{{:{width}.{precision}f}}",
"G": "{{:{width}.{precision}g}}",
}
TDISP_FMT_DICT["A"] = TDISP_FMT_DICT["L"] = "{{:>{width}}}"
TDISP_FMT_DICT["E"] = TDISP_FMT_DICT["D"] = TDISP_FMT_DICT["EN"] = TDISP_FMT_DICT[
"ES"
] = "{{:{width}.{precision}e}}"
# tuple of column/field definition common names and keyword names, make
# sure to preserve the one-to-one correspondence when updating the list(s).
# Use lists, instead of dictionaries so the names can be displayed in a
# preferred order.
KEYWORD_NAMES = (
"TTYPE",
"TFORM",
"TUNIT",
"TNULL",
"TSCAL",
"TZERO",
"TDISP",
"TBCOL",
"TDIM",
"TCTYP",
"TCUNI",
"TCRPX",
"TCRVL",
"TCDLT",
"TRPOS",
)
KEYWORD_ATTRIBUTES = (
"name",
"format",
"unit",
"null",
"bscale",
"bzero",
"disp",
"start",
"dim",
"coord_type",
"coord_unit",
"coord_ref_point",
"coord_ref_value",
"coord_inc",
"time_ref_pos",
)
"""This is a list of the attributes that can be set on `Column` objects."""
KEYWORD_TO_ATTRIBUTE = OrderedDict(zip(KEYWORD_NAMES, KEYWORD_ATTRIBUTES))
ATTRIBUTE_TO_KEYWORD = OrderedDict(zip(KEYWORD_ATTRIBUTES, KEYWORD_NAMES))
# TODO: Define a list of default comments to associate with each table keyword
# TFORMn regular expression
TFORMAT_RE = re.compile(
r"(?P<repeat>^[0-9]*)(?P<format>[LXBIJKAEDCMPQ])" r"(?P<option>[!-~]*)", re.I
)
# TFORMn for ASCII tables; two different versions depending on whether
# the format is floating-point or not; allows empty values for width
# in which case defaults are used
TFORMAT_ASCII_RE = re.compile(
r"(?:(?P<format>[AIJ])(?P<width>[0-9]+)?)|"
r"(?:(?P<formatf>[FED])"
r"(?:(?P<widthf>[0-9]+)(?:\."
r"(?P<precision>[0-9]+))?)?)"
)
TTYPE_RE = re.compile(r"[0-9a-zA-Z_]+")
"""
Regular expression for valid table column names. See FITS Standard v3.0 section
7.2.2.
"""
# table definition keyword regular expression
TDEF_RE = re.compile(r"(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)")
# table dimension keyword regular expression (fairly flexible with whitespace)
TDIM_RE = re.compile(r"\(\s*(?P<dims>(?:\d+\s*)(?:,\s*\d+\s*)*\s*)\)\s*")
# value for ASCII table cell with value = TNULL
# this can be reset by user.
ASCIITNULL = 0
# The default placeholder to use for NULL values in ASCII tables when
# converting from binary to ASCII tables
DEFAULT_ASCII_TNULL = "---"
class Delayed:
"""Delayed file-reading data."""
def __init__(self, hdu=None, field=None):
self.hdu = weakref.proxy(hdu)
self.field = field
def __getitem__(self, key):
# This forces the data for the HDU to be read, which will replace
# the corresponding Delayed objects in the Tables Columns to be
# transformed into ndarrays. It will also return the value of the
# requested data element.
return self.hdu.data[key][self.field]
class _BaseColumnFormat(str):
"""
Base class for binary table column formats (just called _ColumnFormat)
and ASCII table column formats (_AsciiColumnFormat).
"""
def __eq__(self, other):
if not other:
return False
if isinstance(other, str):
if not isinstance(other, self.__class__):
try:
other = self.__class__(other)
except ValueError:
return False
else:
return False
return self.canonical == other.canonical
def __hash__(self):
return hash(self.canonical)
@lazyproperty
def dtype(self):
"""
The Numpy dtype object created from the format's associated recformat.
"""
return np.dtype(self.recformat)
@classmethod
def from_column_format(cls, format):
"""Creates a column format object from another column format object
regardless of their type.
That is, this can convert a _ColumnFormat to an _AsciiColumnFormat
or vice versa at least in cases where a direct translation is possible.
"""
return cls.from_recformat(format.recformat)
class _ColumnFormat(_BaseColumnFormat):
"""
Represents a FITS binary table column format.
This is an enhancement over using a normal string for the format, since the
repeat count, format code, and option are available as separate attributes,
and smart comparison is used. For example 1J == J.
"""
def __new__(cls, format):
self = super().__new__(cls, format)
self.repeat, self.format, self.option = _parse_tformat(format)
self.format = self.format.upper()
if self.format in ("P", "Q"):
# TODO: There should be a generic factory that returns either
# _FormatP or _FormatQ as appropriate for a given TFORMn
if self.format == "P":
recformat = _FormatP.from_tform(format)
else:
recformat = _FormatQ.from_tform(format)
# Format of variable length arrays
self.p_format = recformat.format
else:
self.p_format = None
return self
@classmethod
def from_recformat(cls, recformat):
"""Creates a column format from a Numpy record dtype format."""
return cls(_convert_format(recformat, reverse=True))
@lazyproperty
def recformat(self):
"""Returns the equivalent Numpy record format string."""
return _convert_format(self)
@lazyproperty
def canonical(self):
"""
Returns a 'canonical' string representation of this format.
This is in the proper form of rTa where T is the single character data
type code, a is the optional part, and r is the repeat. If repeat == 1
(the default) it is left out of this representation.
"""
if self.repeat == 1:
repeat = ""
else:
repeat = str(self.repeat)
return f"{repeat}{self.format}{self.option}"
class _AsciiColumnFormat(_BaseColumnFormat):
"""Similar to _ColumnFormat but specifically for columns in ASCII tables.
The formats of ASCII table columns and binary table columns are inherently
incompatible in FITS. They don't support the same ranges and types of
values, and even reuse format codes in subtly different ways. For example
the format code 'Iw' in ASCII columns refers to any integer whose string
representation is at most w characters wide, so 'I' can represent
effectively any integer that will fit in a FITS columns. Whereas for
binary tables 'I' very explicitly refers to a 16-bit signed integer.
Conversions between the two column formats can be performed using the
``to/from_binary`` methods on this class, or the ``to/from_ascii``
methods on the `_ColumnFormat` class. But again, not all conversions are
possible and may result in a `ValueError`.
"""
def __new__(cls, format, strict=False):
self = super().__new__(cls, format)
self.format, self.width, self.precision = _parse_ascii_tformat(format, strict)
# If no width has been specified, set the dtype here to default as well
if format == self.format:
self.recformat = ASCII2NUMPY[format]
# This is to support handling logical (boolean) data from binary tables
# in an ASCII table
self._pseudo_logical = False
return self
@classmethod
def from_column_format(cls, format):
inst = cls.from_recformat(format.recformat)
# Hack
if format.format == "L":
inst._pseudo_logical = True
return inst
@classmethod
def from_recformat(cls, recformat):
"""Creates a column format from a Numpy record dtype format."""
return cls(_convert_ascii_format(recformat, reverse=True))
@lazyproperty
def recformat(self):
"""Returns the equivalent Numpy record format string."""
return _convert_ascii_format(self)
@lazyproperty
def canonical(self):
"""
Returns a 'canonical' string representation of this format.
This is in the proper form of Tw.d where T is the single character data
type code, w is the width in characters for this field, and d is the
number of digits after the decimal place (for format codes 'E', 'F',
and 'D' only).
"""
if self.format in ("E", "F", "D"):
return f"{self.format}{self.width}.{self.precision}"
return f"{self.format}{self.width}"
class _FormatX(str):
"""For X format in binary tables."""
def __new__(cls, repeat=1):
nbytes = ((repeat - 1) // 8) + 1
# use an array, even if it is only ONE u1 (i.e. use tuple always)
obj = super().__new__(cls, repr((nbytes,)) + "u1")
obj.repeat = repeat
return obj
def __getnewargs__(self):
return (self.repeat,)
@property
def tform(self):
return f"{self.repeat}X"
# TODO: Table column formats need to be verified upon first reading the file;
# as it is, an invalid P format will raise a VerifyError from some deep,
# unexpected place
class _FormatP(str):
"""For P format in variable length table."""
# As far as I can tell from my reading of the FITS standard, a type code is
# *required* for P and Q formats; there is no default
_format_re_template = (
r"(?P<repeat>\d+)?{}(?P<dtype>[LXBIJKAEDCM])" r"(?:\((?P<max>\d*)\))?"
)
_format_code = "P"
_format_re = re.compile(_format_re_template.format(_format_code))
_descriptor_format = "2i4"
def __new__(cls, dtype, repeat=None, max=None):
obj = super().__new__(cls, cls._descriptor_format)
obj.format = NUMPY2FITS[dtype]
obj.dtype = dtype
obj.repeat = repeat
obj.max = max
return obj
def __getnewargs__(self):
return (self.dtype, self.repeat, self.max)
@classmethod
def from_tform(cls, format):
m = cls._format_re.match(format)
if not m or m.group("dtype") not in FITS2NUMPY:
raise VerifyError(f"Invalid column format: {format}")
repeat = m.group("repeat")
array_dtype = m.group("dtype")
max = m.group("max")
if not max:
max = None
return cls(FITS2NUMPY[array_dtype], repeat=repeat, max=max)
@property
def tform(self):
repeat = "" if self.repeat is None else self.repeat
max = "" if self.max is None else self.max
return f"{repeat}{self._format_code}{self.format}({max})"
class _FormatQ(_FormatP):
"""Carries type description of the Q format for variable length arrays.
The Q format is like the P format but uses 64-bit integers in the array
descriptors, allowing for heaps stored beyond 2GB into a file.
"""
_format_code = "Q"
_format_re = re.compile(_FormatP._format_re_template.format(_format_code))
_descriptor_format = "2i8"
class ColumnAttribute:
"""
Descriptor for attributes of `Column` that are associated with keywords
in the FITS header and describe properties of the column as specified in
the FITS standard.
Each `ColumnAttribute` may have a ``validator`` method defined on it.
This validates values set on this attribute to ensure that they meet the
FITS standard. Invalid values will raise a warning and will not be used in
formatting the column. The validator should take two arguments--the
`Column` it is being assigned to, and the new value for the attribute, and
it must raise an `AssertionError` if the value is invalid.
The `ColumnAttribute` itself is a decorator that can be used to define the
``validator`` for each column attribute. For example::
@ColumnAttribute('TTYPE')
def name(col, name):
if not isinstance(name, str):
raise AssertionError
The actual object returned by this decorator is the `ColumnAttribute`
instance though, not the ``name`` function. As such ``name`` is not a
method of the class it is defined in.
The setter for `ColumnAttribute` also updates the header of any table
HDU this column is attached to in order to reflect the change. The
``validator`` should ensure that the value is valid for inclusion in a FITS
header.
"""
def __init__(self, keyword):
self._keyword = keyword
self._validator = None
# The name of the attribute associated with this keyword is currently
# determined from the KEYWORD_NAMES/ATTRIBUTES lists. This could be
# make more flexible in the future, for example, to support custom
# column attributes.
self._attr = "_" + KEYWORD_TO_ATTRIBUTE[self._keyword]
def __get__(self, obj, objtype=None):
if obj is None:
return self
else:
return getattr(obj, self._attr)
def __set__(self, obj, value):
if self._validator is not None:
self._validator(obj, value)
old_value = getattr(obj, self._attr, None)
setattr(obj, self._attr, value)
obj._notify("column_attribute_changed", obj, self._attr[1:], old_value, value)
def __call__(self, func):
"""
Set the validator for this column attribute.
Returns ``self`` so that this can be used as a decorator, as described
in the docs for this class.
"""
self._validator = func
return self
def __repr__(self):
return f"{self.__class__.__name__}('{self._keyword}')"
class Column(NotifierMixin):
"""
Class which contains the definition of one column, e.g. ``ttype``,
``tform``, etc. and the array containing values for the column.
"""
def __init__(
self,
name=None,
format=None,
unit=None,
null=None,
bscale=None,
bzero=None,
disp=None,
start=None,
dim=None,
array=None,
ascii=None,
coord_type=None,
coord_unit=None,
coord_ref_point=None,
coord_ref_value=None,
coord_inc=None,
time_ref_pos=None,
):
"""
Construct a `Column` by specifying attributes. All attributes
except ``format`` can be optional; see :ref:`astropy:column_creation`
and :ref:`astropy:creating_ascii_table` for more information regarding
``TFORM`` keyword.
Parameters
----------
name : str, optional
column name, corresponding to ``TTYPE`` keyword
format : str
column format, corresponding to ``TFORM`` keyword
unit : str, optional
column unit, corresponding to ``TUNIT`` keyword
null : str, optional
null value, corresponding to ``TNULL`` keyword
bscale : int-like, optional
bscale value, corresponding to ``TSCAL`` keyword
bzero : int-like, optional
bzero value, corresponding to ``TZERO`` keyword
disp : str, optional
display format, corresponding to ``TDISP`` keyword
start : int, optional
column starting position (ASCII table only), corresponding
to ``TBCOL`` keyword
dim : str, optional
column dimension corresponding to ``TDIM`` keyword
array : iterable, optional
a `list`, `numpy.ndarray` (or other iterable that can be used to
initialize an ndarray) providing initial data for this column.
The array will be automatically converted, if possible, to the data
format of the column. In the case were non-trivial ``bscale``
and/or ``bzero`` arguments are given, the values in the array must
be the *physical* values--that is, the values of column as if the
scaling has already been applied (the array stored on the column
object will then be converted back to its storage values).
ascii : bool, optional
set `True` if this describes a column for an ASCII table; this
may be required to disambiguate the column format
coord_type : str, optional
coordinate/axis type corresponding to ``TCTYP`` keyword
coord_unit : str, optional
coordinate/axis unit corresponding to ``TCUNI`` keyword
coord_ref_point : int-like, optional
pixel coordinate of the reference point corresponding to ``TCRPX``
keyword
coord_ref_value : int-like, optional
coordinate value at reference point corresponding to ``TCRVL``
keyword
coord_inc : int-like, optional
coordinate increment at reference point corresponding to ``TCDLT``
keyword
time_ref_pos : str, optional
reference position for a time coordinate column corresponding to
``TRPOS`` keyword
"""
if format is None:
raise ValueError("Must specify format to construct Column.")
# any of the input argument (except array) can be a Card or just
# a number/string
kwargs = {"ascii": ascii}
for attr in KEYWORD_ATTRIBUTES:
value = locals()[attr] # get the argument's value
if isinstance(value, Card):
value = value.value
kwargs[attr] = value
valid_kwargs, invalid_kwargs = self._verify_keywords(**kwargs)
if invalid_kwargs:
msg = ["The following keyword arguments to Column were invalid:"]
for val in invalid_kwargs.values():
msg.append(indent(val[1]))
raise VerifyError("\n".join(msg))
for attr in KEYWORD_ATTRIBUTES:
setattr(self, attr, valid_kwargs.get(attr))
# TODO: Try to eliminate the following two special cases
# for recformat and dim:
# This is not actually stored as an attribute on columns for some
# reason
recformat = valid_kwargs["recformat"]
# The 'dim' keyword's original value is stored in self.dim, while
# *only* the tuple form is stored in self._dims.
self._dims = self.dim
self.dim = dim
# Awful hack to use for now to keep track of whether the column holds
# pseudo-unsigned int data
self._pseudo_unsigned_ints = False
# if the column data is not ndarray, make it to be one, i.e.
# input arrays can be just list or tuple, not required to be ndarray
# does not include Object array because there is no guarantee
# the elements in the object array are consistent.
if not isinstance(array, (np.ndarray, chararray.chararray, Delayed)):
try: # try to convert to a ndarray first
if array is not None:
array = np.array(array)
except Exception:
try: # then try to convert it to a strings array
itemsize = int(recformat[1:])
array = chararray.array(array, itemsize=itemsize)
except ValueError:
# then try variable length array
# Note: This includes _FormatQ by inheritance
if isinstance(recformat, _FormatP):
array = _VLF(array, dtype=recformat.dtype)
else:
raise ValueError(
f"Data is inconsistent with the format `{format}`."
)
array = self._convert_to_valid_data_type(array)
# We have required (through documentation) that arrays passed in to
# this constructor are already in their physical values, so we make
# note of that here
if isinstance(array, np.ndarray):
self._physical_values = True
else:
self._physical_values = False
self._parent_fits_rec = None
self.array = array
def __repr__(self):
text = ""
for attr in KEYWORD_ATTRIBUTES:
value = getattr(self, attr)
if value is not None:
text += attr + " = " + repr(value) + "; "
return text[:-2]
def __eq__(self, other):
"""
Two columns are equal if their name and format are the same. Other
attributes aren't taken into account at this time.
"""
# According to the FITS standard column names must be case-insensitive
a = (self.name.lower(), self.format)
b = (other.name.lower(), other.format)
return a == b
def __hash__(self):
"""
Like __eq__, the hash of a column should be based on the unique column
name and format, and be case-insensitive with respect to the column
name.
"""
return hash((self.name.lower(), self.format))
@property
def array(self):
"""
The Numpy `~numpy.ndarray` associated with this `Column`.
If the column was instantiated with an array passed to the ``array``
argument, this will return that array. However, if the column is
later added to a table, such as via `BinTableHDU.from_columns` as
is typically the case, this attribute will be updated to reference
the associated field in the table, which may no longer be the same
array.
"""
# Ideally the .array attribute never would have existed in the first
# place, or would have been internal-only. This is a legacy of the
# older design from Astropy that needs to have continued support, for
# now.
# One of the main problems with this design was that it created a
# reference cycle. When the .array attribute was updated after
# creating a FITS_rec from the column (as explained in the docstring) a
# reference cycle was created. This is because the code in BinTableHDU
# (and a few other places) does essentially the following:
#
# data._coldefs = columns # The ColDefs object holding this Column
# for col in columns:
# col.array = data.field(col.name)
#
# This way each columns .array attribute now points to the field in the
# table data. It's actually a pretty confusing interface (since it
# replaces the array originally pointed to by .array), but it's the way
# things have been for a long, long time.
#
# However, this results, in *many* cases, in a reference cycle.
# Because the array returned by data.field(col.name), while sometimes
# an array that owns its own data, is usually like a slice of the
# original data. It has the original FITS_rec as the array .base.
# This results in the following reference cycle (for the n-th column):
#
# data -> data._coldefs -> data._coldefs[n] ->
# data._coldefs[n].array -> data._coldefs[n].array.base -> data
#
# Because ndarray objects do not handled by Python's garbage collector
# the reference cycle cannot be broken. Therefore the FITS_rec's
# refcount never goes to zero, its __del__ is never called, and its
# memory is never freed. This didn't occur in *all* cases, but it did
# occur in many cases.
#
# To get around this, Column.array is no longer a simple attribute
# like it was previously. Now each Column has a ._parent_fits_rec
# attribute which is a weakref to a FITS_rec object. Code that
# previously assigned each col.array to field in a FITS_rec (as in
# the example a few paragraphs above) is still used, however now
# array.setter checks if a reference cycle will be created. And if
# so, instead of saving directly to the Column's __dict__, it creates
# the ._prent_fits_rec weakref, and all lookups of the column's .array
# go through that instead.
#
# This alone does not fully solve the problem. Because
# _parent_fits_rec is a weakref, if the user ever holds a reference to
# the Column, but deletes all references to the underlying FITS_rec,
# the .array attribute would suddenly start returning None instead of
# the array data. This problem is resolved on FITS_rec's end. See the
# note in the FITS_rec._coldefs property for the rest of the story.
# If the Columns's array is not a reference to an existing FITS_rec,
# then it is just stored in self.__dict__; otherwise check the
# _parent_fits_rec reference if it 's still available.
if "array" in self.__dict__:
return self.__dict__["array"]
elif self._parent_fits_rec is not None:
parent = self._parent_fits_rec()
if parent is not None:
return parent[self.name]
else:
return None
@array.setter
def array(self, array):
# The following looks over the bases of the given array to check if it
# has a ._coldefs attribute (i.e. is a FITS_rec) and that that _coldefs
# contains this Column itself, and would create a reference cycle if we
# stored the array directly in self.__dict__.
# In this case it instead sets up the _parent_fits_rec weakref to the
# underlying FITS_rec, so that array.getter can return arrays through
# self._parent_fits_rec().field(self.name), rather than storing a
# hard reference to the field like it used to.
base = array
while True:
if hasattr(base, "_coldefs") and isinstance(base._coldefs, ColDefs):
for col in base._coldefs:
if col is self and self._parent_fits_rec is None:
self._parent_fits_rec = weakref.ref(base)
# Just in case the user already set .array to their own
# array.
if "array" in self.__dict__:
del self.__dict__["array"]
return
if getattr(base, "base", None) is not None:
base = base.base
else:
break
self.__dict__["array"] = array
@array.deleter
def array(self):
try:
del self.__dict__["array"]
except KeyError:
pass
self._parent_fits_rec = None
@ColumnAttribute("TTYPE")
def name(col, name):
if name is None:
# Allow None to indicate deleting the name, or to just indicate an
# unspecified name (when creating a new Column).
return
# Check that the name meets the recommended standard--other column
# names are *allowed*, but will be discouraged
if isinstance(name, str) and not TTYPE_RE.match(name):
warnings.warn(
"It is strongly recommended that column names contain only "
"upper and lower-case ASCII letters, digits, or underscores "
"for maximum compatibility with other software "
"(got {!r}).".format(name),
VerifyWarning,
)
# This ensures that the new name can fit into a single FITS card
# without any special extension like CONTINUE cards or the like.
if not isinstance(name, str) or len(str(Card("TTYPE", name))) != CARD_LENGTH:
raise AssertionError(
"Column name must be a string able to fit in a single "
"FITS card--typically this means a maximum of 68 "
"characters, though it may be fewer if the string "
"contains special characters like quotes."
)
@ColumnAttribute("TCTYP")
def coord_type(col, coord_type):
if coord_type is None:
return
if not isinstance(coord_type, str) or len(coord_type) > 8:
raise AssertionError(
"Coordinate/axis type must be a string of atmost 8 characters."
)
@ColumnAttribute("TCUNI")
def coord_unit(col, coord_unit):
if coord_unit is not None and not isinstance(coord_unit, str):
raise AssertionError("Coordinate/axis unit must be a string.")
@ColumnAttribute("TCRPX")
def coord_ref_point(col, coord_ref_point):
if coord_ref_point is not None and not isinstance(
coord_ref_point, numbers.Real
):
raise AssertionError(
"Pixel coordinate of the reference point must be real floating type."
)
@ColumnAttribute("TCRVL")
def coord_ref_value(col, coord_ref_value):
if coord_ref_value is not None and not isinstance(
coord_ref_value, numbers.Real
):
raise AssertionError(
"Coordinate value at reference point must be real floating type."
)
@ColumnAttribute("TCDLT")
def coord_inc(col, coord_inc):
if coord_inc is not None and not isinstance(coord_inc, numbers.Real):
raise AssertionError("Coordinate increment must be real floating type.")
@ColumnAttribute("TRPOS")
def time_ref_pos(col, time_ref_pos):
if time_ref_pos is not None and not isinstance(time_ref_pos, str):
raise AssertionError("Time reference position must be a string.")
format = ColumnAttribute("TFORM")
unit = ColumnAttribute("TUNIT")
null = ColumnAttribute("TNULL")
bscale = ColumnAttribute("TSCAL")
bzero = ColumnAttribute("TZERO")
disp = ColumnAttribute("TDISP")
start = ColumnAttribute("TBCOL")
dim = ColumnAttribute("TDIM")
@lazyproperty
def ascii(self):
"""Whether this `Column` represents a column in an ASCII table."""
return isinstance(self.format, _AsciiColumnFormat)
@lazyproperty
def dtype(self):
return self.format.dtype
def copy(self):
"""
Return a copy of this `Column`.
"""
tmp = Column(format="I") # just use a throw-away format
tmp.__dict__ = self.__dict__.copy()
return tmp
@staticmethod
def _convert_format(format, cls):
"""The format argument to this class's initializer may come in many
forms. This uses the given column format class ``cls`` to convert
to a format of that type.
TODO: There should be an abc base class for column format classes
"""
# Short circuit in case we're already a _BaseColumnFormat--there is at
# least one case in which this can happen
if isinstance(format, _BaseColumnFormat):
return format, format.recformat
if format in NUMPY2FITS:
with suppress(VerifyError):
# legit recarray format?
recformat = format
format = cls.from_recformat(format)
try:
# legit FITS format?
format = cls(format)
recformat = format.recformat
except VerifyError:
raise VerifyError(f"Illegal format `{format}`.")
return format, recformat
@classmethod
def _verify_keywords(
cls,
name=None,
format=None,
unit=None,
null=None,
bscale=None,
bzero=None,
disp=None,
start=None,
dim=None,
ascii=None,
coord_type=None,
coord_unit=None,
coord_ref_point=None,
coord_ref_value=None,
coord_inc=None,
time_ref_pos=None,
):
"""
Given the keyword arguments used to initialize a Column, specifically
those that typically read from a FITS header (so excluding array),
verify that each keyword has a valid value.
Returns a 2-tuple of dicts. The first maps valid keywords to their
values. The second maps invalid keywords to a 2-tuple of their value,
and a message explaining why they were found invalid.
"""
valid = {}
invalid = {}
try:
format, recformat = cls._determine_formats(format, start, dim, ascii)
valid.update(format=format, recformat=recformat)
except (ValueError, VerifyError) as err:
msg = (
f"Column format option (TFORMn) failed verification: {err!s} "
"The invalid value will be ignored for the purpose of "
"formatting the data in this column."
)
invalid["format"] = (format, msg)
except AttributeError as err:
msg = (
"Column format option (TFORMn) must be a string with a valid "
f"FITS table format (got {format!s}: {err!s}). "
"The invalid value will be ignored for the purpose of "
"formatting the data in this column."
)
invalid["format"] = (format, msg)
# Currently we don't have any validation for name, unit, bscale, or
# bzero so include those by default
# TODO: Add validation for these keywords, obviously
for k, v in [
("name", name),
("unit", unit),
("bscale", bscale),
("bzero", bzero),
]:
if v is not None and v != "":
valid[k] = v
# Validate null option
# Note: Enough code exists that thinks empty strings are sensible
# inputs for these options that we need to treat '' as None
if null is not None and null != "":
msg = None
if isinstance(format, _AsciiColumnFormat):
null = str(null)
if len(null) > format.width:
msg = (
"ASCII table null option (TNULLn) is longer than "
"the column's character width and will be truncated "
"(got {!r}).".format(null)
)
else:
tnull_formats = ("B", "I", "J", "K")
if not _is_int(null):
# Make this an exception instead of a warning, since any
# non-int value is meaningless
msg = (
"Column null option (TNULLn) must be an integer for "
"binary table columns (got {!r}). The invalid value "
"will be ignored for the purpose of formatting "
"the data in this column.".format(null)
)
elif not (
format.format in tnull_formats
or (
format.format in ("P", "Q") and format.p_format in tnull_formats
)
):
# TODO: We should also check that TNULLn's integer value
# is in the range allowed by the column's format
msg = (
"Column null option (TNULLn) is invalid for binary "
"table columns of type {!r} (got {!r}). The invalid "
"value will be ignored for the purpose of formatting "
"the data in this column.".format(format, null)
)
if msg is None:
valid["null"] = null
else:
invalid["null"] = (null, msg)
# Validate the disp option
# TODO: Add full parsing and validation of TDISPn keywords
if disp is not None and disp != "":
msg = None
if not isinstance(disp, str):
msg = (
"Column disp option (TDISPn) must be a string (got "
f"{disp!r}). The invalid value will be ignored for the "
"purpose of formatting the data in this column."
)
elif isinstance(format, _AsciiColumnFormat) and disp[0].upper() == "L":
# disp is at least one character long and has the 'L' format
# which is not recognized for ASCII tables
msg = (
"Column disp option (TDISPn) may not use the 'L' format "
"with ASCII table columns. The invalid value will be "
"ignored for the purpose of formatting the data in this "
"column."
)
if msg is None:
try:
_parse_tdisp_format(disp)
valid["disp"] = disp
except VerifyError as err:
msg = (
"Column disp option (TDISPn) failed verification: "
f"{err!s} The invalid value will be ignored for the "
"purpose of formatting the data in this column."
)
invalid["disp"] = (disp, msg)
else:
invalid["disp"] = (disp, msg)
# Validate the start option
if start is not None and start != "":
msg = None
if not isinstance(format, _AsciiColumnFormat):
# The 'start' option only applies to ASCII columns
msg = (
"Column start option (TBCOLn) is not allowed for binary "
"table columns (got {!r}). The invalid keyword will be "
"ignored for the purpose of formatting the data in this "
"column.".format(start)
)
else:
try:
start = int(start)
except (TypeError, ValueError):
pass
if not _is_int(start) or start < 1:
msg = (
"Column start option (TBCOLn) must be a positive integer "
"(got {!r}). The invalid value will be ignored for the "
"purpose of formatting the data in this column.".format(start)
)
if msg is None:
valid["start"] = start
else:
invalid["start"] = (start, msg)
# Process TDIMn options
# ASCII table columns can't have a TDIMn keyword associated with it;
# for now we just issue a warning and ignore it.
# TODO: This should be checked by the FITS verification code
if dim is not None and dim != "":
msg = None
dims_tuple = tuple()
# NOTE: If valid, the dim keyword's value in the the valid dict is
# a tuple, not the original string; if invalid just the original
# string is returned
if isinstance(format, _AsciiColumnFormat):
msg = (
"Column dim option (TDIMn) is not allowed for ASCII table "
"columns (got {!r}). The invalid keyword will be ignored "
"for the purpose of formatting this column.".format(dim)
)
elif isinstance(dim, str):
dims_tuple = _parse_tdim(dim)
elif isinstance(dim, tuple):
dims_tuple = dim
else:
msg = (
"`dim` argument must be a string containing a valid value "
"for the TDIMn header keyword associated with this column, "
"or a tuple containing the C-order dimensions for the "
"column. The invalid value will be ignored for the purpose "
"of formatting this column."
)
if dims_tuple:
if reduce(operator.mul, dims_tuple) > format.repeat:
msg = (
"The repeat count of the column format {!r} for column {!r} "
"is fewer than the number of elements per the TDIM "
"argument {!r}. The invalid TDIMn value will be ignored "
"for the purpose of formatting this column.".format(
name, format, dim
)
)
if msg is None:
valid["dim"] = dims_tuple
else:
invalid["dim"] = (dim, msg)
if coord_type is not None and coord_type != "":
msg = None
if not isinstance(coord_type, str):
msg = (
"Coordinate/axis type option (TCTYPn) must be a string "
"(got {!r}). The invalid keyword will be ignored for the "
"purpose of formatting this column.".format(coord_type)
)
elif len(coord_type) > 8:
msg = (
"Coordinate/axis type option (TCTYPn) must be a string "
"of atmost 8 characters (got {!r}). The invalid keyword "
"will be ignored for the purpose of formatting this "
"column.".format(coord_type)
)
if msg is None:
valid["coord_type"] = coord_type
else:
invalid["coord_type"] = (coord_type, msg)
if coord_unit is not None and coord_unit != "":
msg = None
if not isinstance(coord_unit, str):
msg = (
"Coordinate/axis unit option (TCUNIn) must be a string "
"(got {!r}). The invalid keyword will be ignored for the "
"purpose of formatting this column.".format(coord_unit)
)
if msg is None:
valid["coord_unit"] = coord_unit
else:
invalid["coord_unit"] = (coord_unit, msg)
for k, v in [
("coord_ref_point", coord_ref_point),
("coord_ref_value", coord_ref_value),
("coord_inc", coord_inc),
]:
if v is not None and v != "":
msg = None
if not isinstance(v, numbers.Real):
msg = (
"Column {} option ({}n) must be a real floating type (got"
" {!r}). The invalid value will be ignored for the purpose of"
" formatting the data in this column.".format(
k, ATTRIBUTE_TO_KEYWORD[k], v
)
)
if msg is None:
valid[k] = v
else:
invalid[k] = (v, msg)
if time_ref_pos is not None and time_ref_pos != "":
msg = None
if not isinstance(time_ref_pos, str):
msg = (
"Time coordinate reference position option (TRPOSn) must be "
"a string (got {!r}). The invalid keyword will be ignored for "
"the purpose of formatting this column.".format(time_ref_pos)
)
if msg is None:
valid["time_ref_pos"] = time_ref_pos
else:
invalid["time_ref_pos"] = (time_ref_pos, msg)
return valid, invalid
@classmethod
def _determine_formats(cls, format, start, dim, ascii):
"""
Given a format string and whether or not the Column is for an
ASCII table (ascii=None means unspecified, but lean toward binary table
where ambiguous) create an appropriate _BaseColumnFormat instance for
the column's format, and determine the appropriate recarray format.
The values of the start and dim keyword arguments are also useful, as
the former is only valid for ASCII tables and the latter only for
BINARY tables.
"""
# If the given format string is unambiguously a Numpy dtype or one of
# the Numpy record format type specifiers supported by Astropy then that
# should take priority--otherwise assume it is a FITS format
if isinstance(format, np.dtype):
format, _, _ = _dtype_to_recformat(format)
# check format
if ascii is None and not isinstance(format, _BaseColumnFormat):
# We're just give a string which could be either a Numpy format
# code, or a format for a binary column array *or* a format for an
# ASCII column array--there may be many ambiguities here. Try our
# best to guess what the user intended.
format, recformat = cls._guess_format(format, start, dim)
elif not ascii and not isinstance(format, _BaseColumnFormat):
format, recformat = cls._convert_format(format, _ColumnFormat)
elif ascii and not isinstance(format, _AsciiColumnFormat):
format, recformat = cls._convert_format(format, _AsciiColumnFormat)
else:
# The format is already acceptable and unambiguous
recformat = format.recformat
return format, recformat
@classmethod
def _guess_format(cls, format, start, dim):
if start and dim:
# This is impossible; this can't be a valid FITS column
raise ValueError(
"Columns cannot have both a start (TCOLn) and dim "
"(TDIMn) option, since the former is only applies to "
"ASCII tables, and the latter is only valid for binary "
"tables."
)
elif start:
# Only ASCII table columns can have a 'start' option
guess_format = _AsciiColumnFormat
elif dim:
# Only binary tables can have a dim option
guess_format = _ColumnFormat
else:
# If the format is *technically* a valid binary column format
# (i.e. it has a valid format code followed by arbitrary
# "optional" codes), but it is also strictly a valid ASCII
# table format, then assume an ASCII table column was being
# requested (the more likely case, after all).
with suppress(VerifyError):
format = _AsciiColumnFormat(format, strict=True)
# A safe guess which reflects the existing behavior of previous
# Astropy versions
guess_format = _ColumnFormat
try:
format, recformat = cls._convert_format(format, guess_format)
except VerifyError:
# For whatever reason our guess was wrong (for example if we got
# just 'F' that's not a valid binary format, but it an ASCII format
# code albeit with the width/precision omitted
guess_format = (
_AsciiColumnFormat if guess_format is _ColumnFormat else _ColumnFormat
)
# If this fails too we're out of options--it is truly an invalid
# format, or at least not supported
format, recformat = cls._convert_format(format, guess_format)
return format, recformat
def _convert_to_valid_data_type(self, array):
# Convert the format to a type we understand
if isinstance(array, Delayed):
return array
elif array is None:
return array
else:
format = self.format
dims = self._dims
if dims:
shape = dims[:-1] if "A" in format else dims
shape = (len(array),) + shape
array = array.reshape(shape)
if "P" in format or "Q" in format:
return array
elif "A" in format:
if array.dtype.char in "SU":
if dims:
# The 'last' dimension (first in the order given
# in the TDIMn keyword itself) is the number of
# characters in each string
fsize = dims[-1]
else:
fsize = np.dtype(format.recformat).itemsize
return chararray.array(array, itemsize=fsize, copy=False)
else:
return _convert_array(array, np.dtype(format.recformat))
elif "L" in format:
# boolean needs to be scaled back to storage values ('T', 'F')
if array.dtype == np.dtype("bool"):
return np.where(array == np.False_, ord("F"), ord("T"))
else:
return np.where(array == 0, ord("F"), ord("T"))
elif "X" in format:
return _convert_array(array, np.dtype("uint8"))
else:
# Preserve byte order of the original array for now; see #77
numpy_format = array.dtype.byteorder + format.recformat
# Handle arrays passed in as unsigned ints as pseudo-unsigned
# int arrays; blatantly tacked in here for now--we need columns
# to have explicit knowledge of whether they treated as
# pseudo-unsigned
bzeros = {
2: np.uint16(2**15),
4: np.uint32(2**31),
8: np.uint64(2**63),
}
if (
array.dtype.kind == "u"
and array.dtype.itemsize in bzeros
and self.bscale in (1, None, "")
and self.bzero == bzeros[array.dtype.itemsize]
):
# Basically the array is uint, has scale == 1.0, and the
# bzero is the appropriate value for a pseudo-unsigned
# integer of the input dtype, then go ahead and assume that
# uint is assumed
numpy_format = numpy_format.replace("i", "u")
self._pseudo_unsigned_ints = True
# The .base here means we're dropping the shape information,
# which is only used to format recarray fields, and is not
# useful for converting input arrays to the correct data type
dtype = np.dtype(numpy_format).base
return _convert_array(array, dtype)
class ColDefs(NotifierMixin):
"""
Column definitions class.
It has attributes corresponding to the `Column` attributes
(e.g. `ColDefs` has the attribute ``names`` while `Column`
has ``name``). Each attribute in `ColDefs` is a list of
corresponding attribute values from all `Column` objects.
"""
_padding_byte = "\x00"
_col_format_cls = _ColumnFormat
def __new__(cls, input, ascii=False):
klass = cls
if hasattr(input, "_columns_type") and issubclass(input._columns_type, ColDefs):
klass = input._columns_type
elif hasattr(input, "_col_format_cls") and issubclass(
input._col_format_cls, _AsciiColumnFormat
):
klass = _AsciiColDefs
if ascii: # force ASCII if this has been explicitly requested
klass = _AsciiColDefs
return object.__new__(klass)
def __getnewargs__(self):
return (self._arrays,)
def __init__(self, input, ascii=False):
"""
Parameters
----------
input : sequence of `Column` or `ColDefs` or ndarray or `~numpy.recarray`
An existing table HDU, an existing `ColDefs`, or any multi-field
Numpy array or `numpy.recarray`.
ascii : bool
Use True to ensure that ASCII table columns are used.
"""
from .fitsrec import FITS_rec
from .hdu.table import _TableBaseHDU
if isinstance(input, ColDefs):
self._init_from_coldefs(input)
elif (
isinstance(input, FITS_rec)
and hasattr(input, "_coldefs")
and input._coldefs
):
# If given a FITS_rec object we can directly copy its columns, but
# only if its columns have already been defined, otherwise this
# will loop back in on itself and blow up
self._init_from_coldefs(input._coldefs)
elif isinstance(input, np.ndarray) and input.dtype.fields is not None:
# Construct columns from the fields of a record array
self._init_from_array(input)
elif isiterable(input):
# if the input is a list of Columns
self._init_from_sequence(input)
elif isinstance(input, _TableBaseHDU):
# Construct columns from fields in an HDU header
self._init_from_table(input)
else:
raise TypeError(
"Input to ColDefs must be a table HDU, a list "
"of Columns, or a record/field array."
)
# Listen for changes on all columns
for col in self.columns:
col._add_listener(self)
def _init_from_coldefs(self, coldefs):
"""Initialize from an existing ColDefs object (just copy the
columns and convert their formats if necessary).
"""
self.columns = [self._copy_column(col) for col in coldefs]
def _init_from_sequence(self, columns):
for idx, col in enumerate(columns):
if not isinstance(col, Column):
raise TypeError(f"Element {idx} in the ColDefs input is not a Column.")
self._init_from_coldefs(columns)
def _init_from_array(self, array):
self.columns = []
for idx in range(len(array.dtype)):
cname = array.dtype.names[idx]
ftype = array.dtype.fields[cname][0]
format = self._col_format_cls.from_recformat(ftype)
# Determine the appropriate dimensions for items in the column
dim = array.dtype[idx].shape[::-1]
if dim and (len(dim) > 0 or "A" in format):
if "A" in format:
# should take into account multidimensional items in the column
dimel = int(re.findall("[0-9]+", str(ftype.subdtype[0]))[0])
# n x m string arrays must include the max string
# length in their dimensions (e.g. l x n x m)
dim = (dimel,) + dim
dim = "(" + ",".join(str(d) for d in dim) + ")"
else:
dim = None
# Check for unsigned ints.
bzero = None
if ftype.base.kind == "u":
if "I" in format:
bzero = np.uint16(2**15)
elif "J" in format:
bzero = np.uint32(2**31)
elif "K" in format:
bzero = np.uint64(2**63)
c = Column(
name=cname,
format=format,
array=array.view(np.ndarray)[cname],
bzero=bzero,
dim=dim,
)
self.columns.append(c)
def _init_from_table(self, table):
hdr = table._header
nfields = hdr["TFIELDS"]
# go through header keywords to pick out column definition keywords
# definition dictionaries for each field
col_keywords = [{} for i in range(nfields)]
for keyword in hdr:
key = TDEF_RE.match(keyword)
try:
label = key.group("label")
except Exception:
continue # skip if there is no match
if label in KEYWORD_NAMES:
col = int(key.group("num"))
if 0 < col <= nfields:
attr = KEYWORD_TO_ATTRIBUTE[label]
value = hdr[keyword]
if attr == "format":
# Go ahead and convert the format value to the
# appropriate ColumnFormat container now
value = self._col_format_cls(value)
col_keywords[col - 1][attr] = value
# Verify the column keywords and display any warnings if necessary;
# we only want to pass on the valid keywords
for idx, kwargs in enumerate(col_keywords):
valid_kwargs, invalid_kwargs = Column._verify_keywords(**kwargs)
for val in invalid_kwargs.values():
warnings.warn(
f"Invalid keyword for column {idx + 1}: {val[1]}", VerifyWarning
)
# Special cases for recformat and dim
# TODO: Try to eliminate the need for these special cases
del valid_kwargs["recformat"]
if "dim" in valid_kwargs:
valid_kwargs["dim"] = kwargs["dim"]
col_keywords[idx] = valid_kwargs
# data reading will be delayed
for col in range(nfields):
col_keywords[col]["array"] = Delayed(table, col)
# now build the columns
self.columns = [Column(**attrs) for attrs in col_keywords]
# Add the table HDU is a listener to changes to the columns
# (either changes to individual columns, or changes to the set of
# columns (add/remove/etc.))
self._add_listener(table)
def __copy__(self):
return self.__class__(self)
def __deepcopy__(self, memo):
return self.__class__([copy.deepcopy(c, memo) for c in self.columns])
def _copy_column(self, column):
"""Utility function used currently only by _init_from_coldefs
to help convert columns from binary format to ASCII format or vice
versa if necessary (otherwise performs a straight copy).
"""
if isinstance(column.format, self._col_format_cls):
# This column has a FITS format compatible with this column
# definitions class (that is ascii or binary)
return column.copy()
new_column = column.copy()
# Try to use the Numpy recformat as the equivalency between the
# two formats; if that conversion can't be made then these
# columns can't be transferred
# TODO: Catch exceptions here and raise an explicit error about
# column format conversion
new_column.format = self._col_format_cls.from_column_format(column.format)
# Handle a few special cases of column format options that are not
# compatible between ASCII an binary tables
# TODO: This is sort of hacked in right now; we really need
# separate classes for ASCII and Binary table Columns, and they
# should handle formatting issues like these
if not isinstance(new_column.format, _AsciiColumnFormat):
# the column is a binary table column...
new_column.start = None
if new_column.null is not None:
# We can't just "guess" a value to represent null
# values in the new column, so just disable this for
# now; users may modify it later
new_column.null = None
else:
# the column is an ASCII table column...
if new_column.null is not None:
new_column.null = DEFAULT_ASCII_TNULL
if new_column.disp is not None and new_column.disp.upper().startswith("L"):
# ASCII columns may not use the logical data display format;
# for now just drop the TDISPn option for this column as we
# don't have a systematic conversion of boolean data to ASCII
# tables yet
new_column.disp = None
return new_column
def __getattr__(self, name):
"""
Automatically returns the values for the given keyword attribute for
all `Column`s in this list.
Implements for example self.units, self.formats, etc.
"""
cname = name[:-1]
if cname in KEYWORD_ATTRIBUTES and name[-1] == "s":
attr = []
for col in self.columns:
val = getattr(col, cname)
attr.append(val if val is not None else "")
return attr
raise AttributeError(name)
@lazyproperty
def dtype(self):
# Note: This previously returned a dtype that just used the raw field
# widths based on the format's repeat count, and did not incorporate
# field *shapes* as provided by TDIMn keywords.
# Now this incorporates TDIMn from the start, which makes *this* method
# a little more complicated, but simplifies code elsewhere (for example
# fields will have the correct shapes even in the raw recarray).
formats = []
offsets = [0]
for format_, dim in zip(self.formats, self._dims):
dt = format_.dtype
if len(offsets) < len(self.formats):
# Note: the size of the *original* format_ may be greater than
# one would expect from the number of elements determined by
# dim. The FITS format allows this--the rest of the field is
# filled with undefined values.
offsets.append(offsets[-1] + dt.itemsize)
if dim:
if format_.format == "A":
dt = np.dtype((dt.char + str(dim[-1]), dim[:-1]))
else:
dt = np.dtype((dt.base, dim))
formats.append(dt)
return np.dtype({"names": self.names, "formats": formats, "offsets": offsets})
@lazyproperty
def names(self):
return [col.name for col in self.columns]
@lazyproperty
def formats(self):
return [col.format for col in self.columns]
@lazyproperty
def _arrays(self):
return [col.array for col in self.columns]
@lazyproperty
def _recformats(self):
return [fmt.recformat for fmt in self.formats]
@lazyproperty
def _dims(self):
"""Returns the values of the TDIMn keywords parsed into tuples."""
return [col._dims for col in self.columns]
def __getitem__(self, key):
if isinstance(key, str):
key = _get_index(self.names, key)
x = self.columns[key]
if _is_int(key):
return x
else:
return ColDefs(x)
def __len__(self):
return len(self.columns)
def __repr__(self):
rep = "ColDefs("
if hasattr(self, "columns") and self.columns:
# The hasattr check is mostly just useful in debugging sessions
# where self.columns may not be defined yet
rep += "\n "
rep += "\n ".join([repr(c) for c in self.columns])
rep += "\n"
rep += ")"
return rep
def __add__(self, other, option="left"):
if isinstance(other, Column):
b = [other]
elif isinstance(other, ColDefs):
b = list(other.columns)
else:
raise TypeError("Wrong type of input.")
if option == "left":
tmp = list(self.columns) + b
else:
tmp = b + list(self.columns)
return ColDefs(tmp)
def __radd__(self, other):
return self.__add__(other, "right")
def __sub__(self, other):
if not isinstance(other, (list, tuple)):
other = [other]
_other = [_get_index(self.names, key) for key in other]
indx = list(range(len(self)))
for x in _other:
indx.remove(x)
tmp = [self[i] for i in indx]
return ColDefs(tmp)
def _update_column_attribute_changed(self, column, attr, old_value, new_value):
"""
Handle column attribute changed notifications from columns that are
members of this `ColDefs`.
`ColDefs` itself does not currently do anything with this, and just
bubbles the notification up to any listening table HDUs that may need
to update their headers, etc. However, this also informs the table of
the numerical index of the column that changed.
"""
idx = 0
for idx, col in enumerate(self.columns):
if col is column:
break
if attr == "name":
del self.names
elif attr == "format":
del self.formats
self._notify(
"column_attribute_changed", column, idx, attr, old_value, new_value
)
def add_col(self, column):
"""
Append one `Column` to the column definition.
"""
if not isinstance(column, Column):
raise AssertionError
# Ask the HDU object to load the data before we modify our columns
self._notify("load_data")
self._arrays.append(column.array)
# Obliterate caches of certain things
del self.dtype
del self._recformats
del self._dims
del self.names
del self.formats
self.columns.append(column)
# Listen for changes on the new column
column._add_listener(self)
# If this ColDefs is being tracked by a Table, inform the
# table that its data is now invalid.
self._notify("column_added", self, column)
return self
def del_col(self, col_name):
"""
Delete (the definition of) one `Column`.
col_name : str or int
The column's name or index
"""
# Ask the HDU object to load the data before we modify our columns
self._notify("load_data")
indx = _get_index(self.names, col_name)
col = self.columns[indx]
del self._arrays[indx]
# Obliterate caches of certain things
del self.dtype
del self._recformats
del self._dims
del self.names
del self.formats
del self.columns[indx]
col._remove_listener(self)
# If this ColDefs is being tracked by a table HDU, inform the HDU (or
# any other listeners) that the column has been removed
# Just send a reference to self, and the index of the column that was
# removed
self._notify("column_removed", self, indx)
return self
def change_attrib(self, col_name, attrib, new_value):
"""
Change an attribute (in the ``KEYWORD_ATTRIBUTES`` list) of a `Column`.
Parameters
----------
col_name : str or int
The column name or index to change
attrib : str
The attribute name
new_value : object
The new value for the attribute
"""
setattr(self[col_name], attrib, new_value)
def change_name(self, col_name, new_name):
"""
Change a `Column`'s name.
Parameters
----------
col_name : str
The current name of the column
new_name : str
The new name of the column
"""
if new_name != col_name and new_name in self.names:
raise ValueError(f"New name {new_name} already exists.")
else:
self.change_attrib(col_name, "name", new_name)
def change_unit(self, col_name, new_unit):
"""
Change a `Column`'s unit.
Parameters
----------
col_name : str or int
The column name or index
new_unit : str
The new unit for the column
"""
self.change_attrib(col_name, "unit", new_unit)
def info(self, attrib="all", output=None):
"""
Get attribute(s) information of the column definition.
Parameters
----------
attrib : str
Can be one or more of the attributes listed in
``astropy.io.fits.column.KEYWORD_ATTRIBUTES``. The default is
``"all"`` which will print out all attributes. It forgives plurals
and blanks. If there are two or more attribute names, they must be
separated by comma(s).
output : file-like, optional
File-like object to output to. Outputs to stdout by default.
If `False`, returns the attributes as a `dict` instead.
Notes
-----
This function doesn't return anything by default; it just prints to
stdout.
"""
if output is None:
output = sys.stdout
if attrib.strip().lower() in ["all", ""]:
lst = KEYWORD_ATTRIBUTES
else:
lst = attrib.split(",")
for idx in range(len(lst)):
lst[idx] = lst[idx].strip().lower()
if lst[idx][-1] == "s":
lst[idx] = list[idx][:-1]
ret = {}
for attr in lst:
if output:
if attr not in KEYWORD_ATTRIBUTES:
output.write(
"'{}' is not an attribute of the column definitions.\n".format(
attr
)
)
continue
output.write(f"{attr}:\n")
output.write(f" {getattr(self, attr + 's')}\n")
else:
ret[attr] = getattr(self, attr + "s")
if not output:
return ret
class _AsciiColDefs(ColDefs):
"""ColDefs implementation for ASCII tables."""
_padding_byte = " "
_col_format_cls = _AsciiColumnFormat
def __init__(self, input, ascii=True):
super().__init__(input)
# if the format of an ASCII column has no width, add one
if not isinstance(input, _AsciiColDefs):
self._update_field_metrics()
else:
for idx, s in enumerate(input.starts):
self.columns[idx].start = s
self._spans = input.spans
self._width = input._width
@lazyproperty
def dtype(self):
dtype = {}
for j in range(len(self)):
data_type = "S" + str(self.spans[j])
dtype[self.names[j]] = (data_type, self.starts[j] - 1)
return np.dtype(dtype)
@property
def spans(self):
"""A list of the widths of each field in the table."""
return self._spans
@lazyproperty
def _recformats(self):
if len(self) == 1:
widths = []
else:
widths = [y - x for x, y in pairwise(self.starts)]
# Widths is the width of each field *including* any space between
# fields; this is so that we can map the fields to string records in a
# Numpy recarray
widths.append(self._width - self.starts[-1] + 1)
return ["a" + str(w) for w in widths]
def add_col(self, column):
super().add_col(column)
self._update_field_metrics()
def del_col(self, col_name):
super().del_col(col_name)
self._update_field_metrics()
def _update_field_metrics(self):
"""
Updates the list of the start columns, the list of the widths of each
field, and the total width of each record in the table.
"""
spans = [0] * len(self.columns)
end_col = 0 # Refers to the ASCII text column, not the table col
for idx, col in enumerate(self.columns):
width = col.format.width
# Update the start columns and column span widths taking into
# account the case that the starting column of a field may not
# be the column immediately after the previous field
if not col.start:
col.start = end_col + 1
end_col = col.start + width - 1
spans[idx] = width
self._spans = spans
self._width = end_col
# Utilities
class _VLF(np.ndarray):
"""Variable length field object."""
def __new__(cls, input, dtype="a"):
"""
Parameters
----------
input
a sequence of variable-sized elements.
"""
if dtype == "a":
try:
# this handles ['abc'] and [['a','b','c']]
# equally, beautiful!
input = [chararray.array(x, itemsize=1) for x in input]
except Exception:
raise ValueError(f"Inconsistent input data array: {input}")
a = np.array(input, dtype=object)
self = np.ndarray.__new__(cls, shape=(len(input),), buffer=a, dtype=object)
self.max = 0
self.element_dtype = dtype
return self
def __array_finalize__(self, obj):
if obj is None:
return
self.max = obj.max
self.element_dtype = obj.element_dtype
def __setitem__(self, key, value):
"""
To make sure the new item has consistent data type to avoid
misalignment.
"""
if isinstance(value, np.ndarray) and value.dtype == self.dtype:
pass
elif isinstance(value, chararray.chararray) and value.itemsize == 1:
pass
elif self.element_dtype == "a":
value = chararray.array(value, itemsize=1)
else:
value = np.array(value, dtype=self.element_dtype)
np.ndarray.__setitem__(self, key, value)
self.max = max(self.max, len(value))
def tolist(self):
return [list(item) for item in super().tolist()]
def _get_index(names, key):
"""
Get the index of the ``key`` in the ``names`` list.
The ``key`` can be an integer or string. If integer, it is the index
in the list. If string,
a. Field (column) names are case sensitive: you can have two
different columns called 'abc' and 'ABC' respectively.
b. When you *refer* to a field (presumably with the field
method), it will try to match the exact name first, so in
the example in (a), field('abc') will get the first field,
and field('ABC') will get the second field.
If there is no exact name matched, it will try to match the
name with case insensitivity. So, in the last example,
field('Abc') will cause an exception since there is no unique
mapping. If there is a field named "XYZ" and no other field
name is a case variant of "XYZ", then field('xyz'),
field('Xyz'), etc. will get this field.
"""
if _is_int(key):
indx = int(key)
elif isinstance(key, str):
# try to find exact match first
try:
indx = names.index(key.rstrip())
except ValueError:
# try to match case-insentively,
_key = key.lower().rstrip()
names = [n.lower().rstrip() for n in names]
count = names.count(_key) # occurrence of _key in names
if count == 1:
indx = names.index(_key)
elif count == 0:
raise KeyError(f"Key '{key}' does not exist.")
else: # multiple match
raise KeyError(f"Ambiguous key name '{key}'.")
else:
raise KeyError(f"Illegal key '{key!r}'.")
return indx
def _unwrapx(input, output, repeat):
"""
Unwrap the X format column into a Boolean array.
Parameters
----------
input
input ``Uint8`` array of shape (`s`, `nbytes`)
output
output Boolean array of shape (`s`, `repeat`)
repeat
number of bits
"""
pow2 = np.array([128, 64, 32, 16, 8, 4, 2, 1], dtype="uint8")
nbytes = ((repeat - 1) // 8) + 1
for i in range(nbytes):
_min = i * 8
_max = min((i + 1) * 8, repeat)
for j in range(_min, _max):
output[..., j] = np.bitwise_and(input[..., i], pow2[j - i * 8])
def _wrapx(input, output, repeat):
"""
Wrap the X format column Boolean array into an ``UInt8`` array.
Parameters
----------
input
input Boolean array of shape (`s`, `repeat`)
output
output ``Uint8`` array of shape (`s`, `nbytes`)
repeat
number of bits
"""
output[...] = 0 # reset the output
nbytes = ((repeat - 1) // 8) + 1
unused = nbytes * 8 - repeat
for i in range(nbytes):
_min = i * 8
_max = min((i + 1) * 8, repeat)
for j in range(_min, _max):
if j != _min:
np.left_shift(output[..., i], 1, output[..., i])
np.add(output[..., i], input[..., j], output[..., i])
# shift the unused bits
np.left_shift(output[..., i], unused, output[..., i])
def _makep(array, descr_output, format, nrows=None):
"""
Construct the P (or Q) format column array, both the data descriptors and
the data. It returns the output "data" array of data type `dtype`.
The descriptor location will have a zero offset for all columns
after this call. The final offset will be calculated when the file
is written.
Parameters
----------
array
input object array
descr_output
output "descriptor" array of data type int32 (for P format arrays) or
int64 (for Q format arrays)--must be nrows long in its first dimension
format
the _FormatP object representing the format of the variable array
nrows : int, optional
number of rows to create in the column; defaults to the number of rows
in the input array
"""
# TODO: A great deal of this is redundant with FITS_rec._convert_p; see if
# we can merge the two somehow.
_offset = 0
if not nrows:
nrows = len(array)
data_output = _VLF([None] * nrows, dtype=format.dtype)
if format.dtype == "a":
_nbytes = 1
else:
_nbytes = np.array([], dtype=format.dtype).itemsize
for idx in range(nrows):
if idx < len(array):
rowval = array[idx]
else:
if format.dtype == "a":
rowval = " " * data_output.max
else:
rowval = [0] * data_output.max
if format.dtype == "a":
data_output[idx] = chararray.array(encode_ascii(rowval), itemsize=1)
else:
data_output[idx] = np.array(rowval, dtype=format.dtype)
descr_output[idx, 0] = len(data_output[idx])
descr_output[idx, 1] = _offset
_offset += len(data_output[idx]) * _nbytes
return data_output
def _parse_tformat(tform):
"""Parse ``TFORMn`` keyword for a binary table into a
``(repeat, format, option)`` tuple.
"""
try:
(repeat, format, option) = TFORMAT_RE.match(tform.strip()).groups()
except Exception:
# TODO: Maybe catch this error use a default type (bytes, maybe?) for
# unrecognized column types. As long as we can determine the correct
# byte width somehow..
raise VerifyError(f"Format {tform!r} is not recognized.")
if repeat == "":
repeat = 1
else:
repeat = int(repeat)
return (repeat, format.upper(), option)
def _parse_ascii_tformat(tform, strict=False):
"""
Parse the ``TFORMn`` keywords for ASCII tables into a ``(format, width,
precision)`` tuple (the latter is always zero unless format is one of 'E',
'F', or 'D').
"""
match = TFORMAT_ASCII_RE.match(tform.strip())
if not match:
raise VerifyError(f"Format {tform!r} is not recognized.")
# Be flexible on case
format = match.group("format")
if format is None:
# Floating point format
format = match.group("formatf").upper()
width = match.group("widthf")
precision = match.group("precision")
if width is None or precision is None:
if strict:
raise VerifyError(
"Format {!r} is not unambiguously an ASCII table format."
)
else:
width = 0 if width is None else width
precision = 1 if precision is None else precision
else:
format = format.upper()
width = match.group("width")
if width is None:
if strict:
raise VerifyError(
"Format {!r} is not unambiguously an ASCII table format."
)
else:
# Just use a default width of 0 if unspecified
width = 0
precision = 0
def convert_int(val):
msg = (
"Format {!r} is not valid--field width and decimal precision "
"must be integers."
)
try:
val = int(val)
except (ValueError, TypeError):
raise VerifyError(msg.format(tform))
return val
if width and precision:
# This should only be the case for floating-point formats
width, precision = convert_int(width), convert_int(precision)
elif width:
# Just for integer/string formats; ignore precision
width = convert_int(width)
else:
# For any format, if width was unspecified use the set defaults
width, precision = ASCII_DEFAULT_WIDTHS[format]
if width <= 0:
raise VerifyError(
"Format {!r} not valid--field width must be a positive integeter.".format(
tform
)
)
if precision >= width:
raise VerifyError(
"Format {!r} not valid--the number of decimal digits "
"must be less than the format's total "
"width {}.".format(tform, width)
)
return format, width, precision
def _parse_tdim(tdim):
"""Parse the ``TDIM`` value into a tuple (may return an empty tuple if
the value ``TDIM`` value is empty or invalid).
"""
m = tdim and TDIM_RE.match(tdim)
if m:
dims = m.group("dims")
return tuple(int(d.strip()) for d in dims.split(","))[::-1]
# Ignore any dim values that don't specify a multidimensional column
return tuple()
def _scalar_to_format(value):
"""
Given a scalar value or string, returns the minimum FITS column format
that can represent that value. 'minimum' is defined by the order given in
FORMATORDER.
"""
# First, if value is a string, try to convert to the appropriate scalar
# value
for type_ in (int, float, complex):
try:
value = type_(value)
break
except ValueError:
continue
numpy_dtype_str = np.min_scalar_type(value).str
numpy_dtype_str = numpy_dtype_str[1:] # Strip endianness
try:
fits_format = NUMPY2FITS[numpy_dtype_str]
return FITSUPCONVERTERS.get(fits_format, fits_format)
except KeyError:
return "A" + str(len(value))
def _cmp_recformats(f1, f2):
"""
Compares two numpy recformats using the ordering given by FORMATORDER.
"""
if f1[0] == "a" and f2[0] == "a":
return cmp(int(f1[1:]), int(f2[1:]))
else:
f1, f2 = NUMPY2FITS[f1], NUMPY2FITS[f2]
return cmp(FORMATORDER.index(f1), FORMATORDER.index(f2))
def _convert_fits2record(format):
"""
Convert FITS format spec to record format spec.
"""
repeat, dtype, option = _parse_tformat(format)
if dtype in FITS2NUMPY:
if dtype == "A":
output_format = FITS2NUMPY[dtype] + str(repeat)
# to accommodate both the ASCII table and binary table column
# format spec, i.e. A7 in ASCII table is the same as 7A in
# binary table, so both will produce 'a7'.
# Technically the FITS standard does not allow this but it's a very
# common mistake
if format.lstrip()[0] == "A" and option != "":
# make sure option is integer
output_format = FITS2NUMPY[dtype] + str(int(option))
else:
repeat_str = ""
if repeat != 1:
repeat_str = str(repeat)
output_format = repeat_str + FITS2NUMPY[dtype]
elif dtype == "X":
output_format = _FormatX(repeat)
elif dtype == "P":
output_format = _FormatP.from_tform(format)
elif dtype == "Q":
output_format = _FormatQ.from_tform(format)
elif dtype == "F":
output_format = "f8"
else:
raise ValueError(f"Illegal format `{format}`.")
return output_format
def _convert_record2fits(format):
"""
Convert record format spec to FITS format spec.
"""
recformat, kind, dtype = _dtype_to_recformat(format)
shape = dtype.shape
itemsize = dtype.base.itemsize
if dtype.char == "U" or (
dtype.subdtype is not None and dtype.subdtype[0].char == "U"
):
# Unicode dtype--itemsize is 4 times actual ASCII character length,
# which what matters for FITS column formats
# Use dtype.base and dtype.subdtype --dtype for multi-dimensional items
itemsize = itemsize // 4
option = str(itemsize)
ndims = len(shape)
repeat = 1
if ndims > 0:
nel = np.array(shape, dtype="i8").prod()
if nel > 1:
repeat = nel
if kind == "a":
# This is a kludge that will place string arrays into a
# single field, so at least we won't lose data. Need to
# use a TDIM keyword to fix this, declaring as (slength,
# dim1, dim2, ...) as mwrfits does
ntot = int(repeat) * int(option)
output_format = str(ntot) + "A"
elif recformat in NUMPY2FITS: # record format
if repeat != 1:
repeat = str(repeat)
else:
repeat = ""
output_format = repeat + NUMPY2FITS[recformat]
else:
raise ValueError(f"Illegal format `{format}`.")
return output_format
def _dtype_to_recformat(dtype):
"""
Utility function for converting a dtype object or string that instantiates
a dtype (e.g. 'float32') into one of the two character Numpy format codes
that have been traditionally used by Astropy.
In particular, use of 'a' to refer to character data is long since
deprecated in Numpy, but Astropy remains heavily invested in its use
(something to try to get away from sooner rather than later).
"""
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
kind = dtype.base.kind
if kind in ("U", "S"):
recformat = kind = "a"
else:
itemsize = dtype.base.itemsize
recformat = kind + str(itemsize)
return recformat, kind, dtype
def _convert_format(format, reverse=False):
"""
Convert FITS format spec to record format spec. Do the opposite if
reverse=True.
"""
if reverse:
return _convert_record2fits(format)
else:
return _convert_fits2record(format)
def _convert_ascii_format(format, reverse=False):
"""Convert ASCII table format spec to record format spec."""
if reverse:
recformat, kind, dtype = _dtype_to_recformat(format)
itemsize = dtype.itemsize
if kind == "a":
return "A" + str(itemsize)
elif NUMPY2FITS.get(recformat) == "L":
# Special case for logical/boolean types--for ASCII tables we
# represent these as single character columns containing 'T' or 'F'
# (a la the storage format for Logical columns in binary tables)
return "A1"
elif kind == "i":
# Use for the width the maximum required to represent integers
# of that byte size plus 1 for signs, but use a minimum of the
# default width (to keep with existing behavior)
width = 1 + len(str(2 ** (itemsize * 8)))
width = max(width, ASCII_DEFAULT_WIDTHS["I"][0])
return "I" + str(width)
elif kind == "f":
# This is tricky, but go ahead and use D if float-64, and E
# if float-32 with their default widths
if itemsize >= 8:
format = "D"
else:
format = "E"
width = ".".join(str(w) for w in ASCII_DEFAULT_WIDTHS[format])
return format + width
# TODO: There may be reasonable ways to represent other Numpy types so
# let's see what other possibilities there are besides just 'a', 'i',
# and 'f'. If it doesn't have a reasonable ASCII representation then
# raise an exception
else:
format, width, precision = _parse_ascii_tformat(format)
# This gives a sensible "default" dtype for a given ASCII
# format code
recformat = ASCII2NUMPY[format]
# The following logic is taken from CFITSIO:
# For integers, if the width <= 4 we can safely use 16-bit ints for all
# values, if width >= 10 we may need to accommodate 64-bit ints.
# values [for the non-standard J format code just always force 64-bit]
if format == "I":
if width <= 4:
recformat = "i2"
elif width > 9:
recformat = "i8"
elif format == "A":
recformat += str(width)
return recformat
def _parse_tdisp_format(tdisp):
"""
Parse the ``TDISPn`` keywords for ASCII and binary tables into a
``(format, width, precision, exponential)`` tuple (the TDISP values
for ASCII and binary are identical except for 'Lw',
which is only present in BINTABLE extensions
Parameters
----------
tdisp : str
TDISPn FITS Header keyword. Used to specify display formatting.
Returns
-------
formatc: str
The format characters from TDISPn
width: str
The width int value from TDISPn
precision: str
The precision int value from TDISPn
exponential: str
The exponential int value from TDISPn
"""
# Use appropriate regex for format type
tdisp = tdisp.strip()
fmt_key = (
tdisp[0]
if tdisp[0] != "E" or (len(tdisp) > 1 and tdisp[1] not in "NS")
else tdisp[:2]
)
try:
tdisp_re = TDISP_RE_DICT[fmt_key]
except KeyError:
raise VerifyError(f"Format {tdisp} is not recognized.")
match = tdisp_re.match(tdisp.strip())
if not match or match.group("formatc") is None:
raise VerifyError(f"Format {tdisp} is not recognized.")
formatc = match.group("formatc")
width = match.group("width")
precision = None
exponential = None
# Some formats have precision and exponential
if tdisp[0] in ("I", "B", "O", "Z", "F", "E", "G", "D"):
precision = match.group("precision")
if precision is None:
precision = 1
if tdisp[0] in ("E", "D", "G") and tdisp[1] not in ("N", "S"):
exponential = match.group("exponential")
if exponential is None:
exponential = 1
# Once parsed, check format dict to do conversion to a formatting string
return formatc, width, precision, exponential
def _fortran_to_python_format(tdisp):
"""
Turn the TDISPn fortran format pieces into a final Python format string.
See the format_type definitions above the TDISP_FMT_DICT. If codes is
changed to take advantage of the exponential specification, will need to
add it as another input parameter.
Parameters
----------
tdisp : str
TDISPn FITS Header keyword. Used to specify display formatting.
Returns
-------
format_string: str
The TDISPn keyword string translated into a Python format string.
"""
format_type, width, precision, exponential = _parse_tdisp_format(tdisp)
try:
fmt = TDISP_FMT_DICT[format_type]
return fmt.format(width=width, precision=precision)
except KeyError:
raise VerifyError(f"Format {format_type} is not recognized.")
def python_to_tdisp(format_string, logical_dtype=False):
"""
Turn the Python format string to a TDISP FITS compliant format string. Not
all formats convert. these will cause a Warning and return None.
Parameters
----------
format_string : str
TDISPn FITS Header keyword. Used to specify display formatting.
logical_dtype : bool
True is this format type should be a logical type, 'L'. Needs special
handling.
Returns
-------
tdsip_string: str
The TDISPn keyword string translated into a Python format string.
"""
fmt_to_tdisp = {
"a": "A",
"s": "A",
"d": "I",
"b": "B",
"o": "O",
"x": "Z",
"X": "Z",
"f": "F",
"F": "F",
"g": "G",
"G": "G",
"e": "E",
"E": "E",
}
if format_string in [None, "", "{}"]:
return None
# Strip out extra format characters that aren't a type or a width/precision
if format_string[0] == "{" and format_string != "{}":
fmt_str = format_string.lstrip("{:").rstrip("}")
elif format_string[0] == "%":
fmt_str = format_string.lstrip("%")
else:
fmt_str = format_string
precision, sep = "", ""
# Character format, only translate right aligned, and don't take zero fills
if fmt_str[-1].isdigit() and fmt_str[0] == ">" and fmt_str[1] != "0":
ftype = fmt_to_tdisp["a"]
width = fmt_str[1:]
elif fmt_str[-1] == "s" and fmt_str != "s":
ftype = fmt_to_tdisp["a"]
width = fmt_str[:-1].lstrip("0")
# Number formats, don't take zero fills
elif fmt_str[-1].isalpha() and len(fmt_str) > 1 and fmt_str[0] != "0":
ftype = fmt_to_tdisp[fmt_str[-1]]
fmt_str = fmt_str[:-1]
# If format has a "." split out the width and precision
if "." in fmt_str:
width, precision = fmt_str.split(".")
sep = "."
if width == "":
ascii_key = ftype if ftype != "G" else "F"
width = str(
int(precision)
+ (
ASCII_DEFAULT_WIDTHS[ascii_key][0]
- ASCII_DEFAULT_WIDTHS[ascii_key][1]
)
)
# Otherwise we just have a width
else:
width = fmt_str
else:
warnings.warn(
"Format {} cannot be mapped to the accepted "
"TDISPn keyword values. Format will not be "
"moved into TDISPn keyword.".format(format_string),
AstropyUserWarning,
)
return None
# Catch logical data type, set the format type back to L in this case
if logical_dtype:
ftype = "L"
return ftype + width + sep + precision
|
fd12ce03c5aac99c92a96726f864a339b224986b951fa4d3f3056a78a3921d66 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import collections
import copy
import itertools
import numbers
import os
import re
import warnings
from astropy.utils import isiterable
from astropy.utils.exceptions import AstropyUserWarning
from ._utils import parse_header
from .card import KEYWORD_LENGTH, UNDEFINED, Card, _pad
from .file import _File
from .util import (
decode_ascii,
encode_ascii,
fileobj_closed,
fileobj_is_binary,
path_like,
)
BLOCK_SIZE = 2880 # the FITS block size
# This regular expression can match a *valid* END card which just consists of
# the string 'END' followed by all spaces, or an *invalid* end card which
# consists of END, followed by any character that is *not* a valid character
# for a valid FITS keyword (that is, this is not a keyword like 'ENDER' which
# starts with 'END' but is not 'END'), followed by any arbitrary bytes. An
# invalid end card may also consist of just 'END' with no trailing bytes.
HEADER_END_RE = re.compile(
encode_ascii(r"(?:(?P<valid>END {77}) *)|(?P<invalid>END$|END {0,76}[^A-Z0-9_-])")
)
# According to the FITS standard the only characters that may appear in a
# header record are the restricted ASCII chars from 0x20 through 0x7E.
VALID_HEADER_CHARS = set(map(chr, range(0x20, 0x7F)))
END_CARD = "END" + " " * 77
__doctest_skip__ = [
"Header",
"Header.comments",
"Header.fromtextfile",
"Header.totextfile",
"Header.set",
"Header.update",
]
class Header:
"""
FITS header class. This class exposes both a dict-like interface and a
list-like interface to FITS headers.
The header may be indexed by keyword and, like a dict, the associated value
will be returned. When the header contains cards with duplicate keywords,
only the value of the first card with the given keyword will be returned.
It is also possible to use a 2-tuple as the index in the form (keyword,
n)--this returns the n-th value with that keyword, in the case where there
are duplicate keywords.
For example::
>>> header['NAXIS']
0
>>> header[('FOO', 1)] # Return the value of the second FOO keyword
'foo'
The header may also be indexed by card number::
>>> header[0] # Return the value of the first card in the header
'T'
Commentary keywords such as HISTORY and COMMENT are special cases: When
indexing the Header object with either 'HISTORY' or 'COMMENT' a list of all
the HISTORY/COMMENT values is returned::
>>> header['HISTORY']
This is the first history entry in this header.
This is the second history entry in this header.
...
See the Astropy documentation for more details on working with headers.
Notes
-----
Although FITS keywords must be exclusively upper case, retrieving an item
in a `Header` object is case insensitive.
"""
def __init__(self, cards=[], copy=False):
"""
Construct a `Header` from an iterable and/or text file.
Parameters
----------
cards : list of `Card`, optional
The cards to initialize the header with. Also allowed are other
`Header` (or `dict`-like) objects.
.. versionchanged:: 1.2
Allowed ``cards`` to be a `dict`-like object.
copy : bool, optional
If ``True`` copies the ``cards`` if they were another `Header`
instance.
Default is ``False``.
.. versionadded:: 1.3
"""
self.clear()
if isinstance(cards, Header):
if copy:
cards = cards.copy()
cards = cards.cards
elif isinstance(cards, dict):
cards = cards.items()
for card in cards:
self.append(card, end=True)
self._modified = False
def __len__(self):
return len(self._cards)
def __iter__(self):
for card in self._cards:
yield card.keyword
def __contains__(self, keyword):
if keyword in self._keyword_indices or keyword in self._rvkc_indices:
# For the most common case (single, standard form keyword lookup)
# this will work and is an O(1) check. If it fails that doesn't
# guarantee absence, just that we have to perform the full set of
# checks in self._cardindex
return True
try:
self._cardindex(keyword)
except (KeyError, IndexError):
return False
return True
def __getitem__(self, key):
if isinstance(key, slice):
return self.__class__([copy.copy(c) for c in self._cards[key]])
elif self._haswildcard(key):
return self.__class__(
[copy.copy(self._cards[idx]) for idx in self._wildcardmatch(key)]
)
elif isinstance(key, str):
key = key.strip()
if key.upper() in Card._commentary_keywords:
key = key.upper()
# Special case for commentary cards
return _HeaderCommentaryCards(self, key)
if isinstance(key, tuple):
keyword = key[0]
else:
keyword = key
card = self._cards[self._cardindex(key)]
if card.field_specifier is not None and keyword == card.rawkeyword:
# This is RVKC; if only the top-level keyword was specified return
# the raw value, not the parsed out float value
return card.rawvalue
value = card.value
if value == UNDEFINED:
return None
return value
def __setitem__(self, key, value):
if self._set_slice(key, value, self):
return
if isinstance(value, tuple):
if len(value) > 2:
raise ValueError(
"A Header item may be set with either a scalar value, "
"a 1-tuple containing a scalar value, or a 2-tuple "
"containing a scalar value and comment string."
)
if len(value) == 1:
value, comment = value[0], None
if value is None:
value = UNDEFINED
elif len(value) == 2:
value, comment = value
if value is None:
value = UNDEFINED
if comment is None:
comment = ""
else:
comment = None
card = None
if isinstance(key, numbers.Integral):
card = self._cards[key]
elif isinstance(key, tuple):
card = self._cards[self._cardindex(key)]
if value is None:
value = UNDEFINED
if card:
card.value = value
if comment is not None:
card.comment = comment
if card._modified:
self._modified = True
else:
# If we get an IndexError that should be raised; we don't allow
# assignment to non-existing indices
self._update((key, value, comment))
def __delitem__(self, key):
if isinstance(key, slice) or self._haswildcard(key):
# This is very inefficient but it's not a commonly used feature.
# If someone out there complains that they make heavy use of slice
# deletions and it's too slow, well, we can worry about it then
# [the solution is not too complicated--it would be wait 'til all
# the cards are deleted before updating _keyword_indices rather
# than updating it once for each card that gets deleted]
if isinstance(key, slice):
indices = range(*key.indices(len(self)))
# If the slice step is backwards we want to reverse it, because
# it will be reversed in a few lines...
if key.step and key.step < 0:
indices = reversed(indices)
else:
indices = self._wildcardmatch(key)
for idx in reversed(indices):
del self[idx]
return
elif isinstance(key, str):
# delete ALL cards with the same keyword name
key = Card.normalize_keyword(key)
indices = self._keyword_indices
if key not in self._keyword_indices:
indices = self._rvkc_indices
if key not in indices:
# if keyword is not present raise KeyError.
# To delete keyword without caring if they were present,
# Header.remove(Keyword) can be used with optional argument ignore_missing as True
raise KeyError(f"Keyword '{key}' not found.")
for idx in reversed(indices[key]):
# Have to copy the indices list since it will be modified below
del self[idx]
return
idx = self._cardindex(key)
card = self._cards[idx]
keyword = card.keyword
del self._cards[idx]
keyword = Card.normalize_keyword(keyword)
indices = self._keyword_indices[keyword]
indices.remove(idx)
if not indices:
del self._keyword_indices[keyword]
# Also update RVKC indices if necessary :/
if card.field_specifier is not None:
indices = self._rvkc_indices[card.rawkeyword]
indices.remove(idx)
if not indices:
del self._rvkc_indices[card.rawkeyword]
# We also need to update all other indices
self._updateindices(idx, increment=False)
self._modified = True
def __repr__(self):
return self.tostring(sep="\n", endcard=False, padding=False)
def __str__(self):
return self.tostring()
def __eq__(self, other):
"""
Two Headers are equal only if they have the exact same string
representation.
"""
return str(self) == str(other)
def __add__(self, other):
temp = self.copy(strip=False)
temp.extend(other)
return temp
def __iadd__(self, other):
self.extend(other)
return self
def _ipython_key_completions_(self):
return self.__iter__()
@property
def cards(self):
"""
The underlying physical cards that make up this Header; it can be
looked at, but it should not be modified directly.
"""
return _CardAccessor(self)
@property
def comments(self):
"""
View the comments associated with each keyword, if any.
For example, to see the comment on the NAXIS keyword:
>>> header.comments['NAXIS']
number of data axes
Comments can also be updated through this interface:
>>> header.comments['NAXIS'] = 'Number of data axes'
"""
return _HeaderComments(self)
@property
def _modified(self):
"""
Whether or not the header has been modified; this is a property so that
it can also check each card for modifications--cards may have been
modified directly without the header containing it otherwise knowing.
"""
modified_cards = any(c._modified for c in self._cards)
if modified_cards:
# If any cards were modified then by definition the header was
# modified
self.__dict__["_modified"] = True
return self.__dict__["_modified"]
@_modified.setter
def _modified(self, val):
self.__dict__["_modified"] = val
@classmethod
def fromstring(cls, data, sep=""):
"""
Creates an HDU header from a byte string containing the entire header
data.
Parameters
----------
data : str or bytes
String or bytes containing the entire header. In the case of bytes
they will be decoded using latin-1 (only plain ASCII characters are
allowed in FITS headers but latin-1 allows us to retain any invalid
bytes that might appear in malformatted FITS files).
sep : str, optional
The string separating cards from each other, such as a newline. By
default there is no card separator (as is the case in a raw FITS
file). In general this is only used in cases where a header was
printed as text (e.g. with newlines after each card) and you want
to create a new `Header` from it by copy/pasting.
Examples
--------
>>> from astropy.io.fits import Header
>>> hdr = Header({'SIMPLE': True})
>>> Header.fromstring(hdr.tostring()) == hdr
True
If you want to create a `Header` from printed text it's not necessary
to have the exact binary structure as it would appear in a FITS file,
with the full 80 byte card length. Rather, each "card" can end in a
newline and does not have to be padded out to a full card length as
long as it "looks like" a FITS header:
>>> hdr = Header.fromstring(\"\"\"\\
... SIMPLE = T / conforms to FITS standard
... BITPIX = 8 / array data type
... NAXIS = 0 / number of array dimensions
... EXTEND = T
... \"\"\", sep='\\n')
>>> hdr['SIMPLE']
True
>>> hdr['BITPIX']
8
>>> len(hdr)
4
Returns
-------
`Header`
A new `Header` instance.
"""
cards = []
# If the card separator contains characters that may validly appear in
# a card, the only way to unambiguously distinguish between cards is to
# require that they be Card.length long. However, if the separator
# contains non-valid characters (namely \n) the cards may be split
# immediately at the separator
require_full_cardlength = set(sep).issubset(VALID_HEADER_CHARS)
if isinstance(data, bytes):
# FITS supports only ASCII, but decode as latin1 and just take all
# bytes for now; if it results in mojibake due to e.g. UTF-8
# encoded data in a FITS header that's OK because it shouldn't be
# there in the first place--accepting it here still gives us the
# opportunity to display warnings later during validation
CONTINUE = b"CONTINUE"
END = b"END"
end_card = END_CARD.encode("ascii")
sep = sep.encode("latin1")
empty = b""
else:
CONTINUE = "CONTINUE"
END = "END"
end_card = END_CARD
empty = ""
# Split the header into individual cards
idx = 0
image = []
while idx < len(data):
if require_full_cardlength:
end_idx = idx + Card.length
else:
try:
end_idx = data.index(sep, idx)
except ValueError:
end_idx = len(data)
next_image = data[idx:end_idx]
idx = end_idx + len(sep)
if image:
if next_image[:8] == CONTINUE:
image.append(next_image)
continue
cards.append(Card.fromstring(empty.join(image)))
if require_full_cardlength:
if next_image == end_card:
image = []
break
else:
if next_image.split(sep)[0].rstrip() == END:
image = []
break
image = [next_image]
# Add the last image that was found before the end, if any
if image:
cards.append(Card.fromstring(empty.join(image)))
return cls._fromcards(cards)
@classmethod
def fromfile(cls, fileobj, sep="", endcard=True, padding=True):
"""
Similar to :meth:`Header.fromstring`, but reads the header string from
a given file-like object or filename.
Parameters
----------
fileobj : str, file-like
A filename or an open file-like object from which a FITS header is
to be read. For open file handles the file pointer must be at the
beginning of the header.
sep : str, optional
The string separating cards from each other, such as a newline. By
default there is no card separator (as is the case in a raw FITS
file).
endcard : bool, optional
If True (the default) the header must end with an END card in order
to be considered valid. If an END card is not found an
`OSError` is raised.
padding : bool, optional
If True (the default) the header will be required to be padded out
to a multiple of 2880, the FITS header block size. Otherwise any
padding, or lack thereof, is ignored.
Returns
-------
`Header`
A new `Header` instance.
"""
close_file = False
if isinstance(fileobj, path_like):
# If sep is non-empty we are trying to read a header printed to a
# text file, so open in text mode by default to support newline
# handling; if a binary-mode file object is passed in, the user is
# then on their own w.r.t. newline handling.
#
# Otherwise assume we are reading from an actual FITS file and open
# in binary mode.
fileobj = os.path.expanduser(fileobj)
if sep:
fileobj = open(fileobj, encoding="latin1")
else:
fileobj = open(fileobj, "rb")
close_file = True
try:
is_binary = fileobj_is_binary(fileobj)
def block_iter(nbytes):
while True:
data = fileobj.read(nbytes)
if data:
yield data
else:
break
return cls._from_blocks(block_iter, is_binary, sep, endcard, padding)[1]
finally:
if close_file:
fileobj.close()
@classmethod
def _fromcards(cls, cards):
header = cls()
for idx, card in enumerate(cards):
header._cards.append(card)
keyword = Card.normalize_keyword(card.keyword)
header._keyword_indices[keyword].append(idx)
if card.field_specifier is not None:
header._rvkc_indices[card.rawkeyword].append(idx)
header._modified = False
return header
@classmethod
def _from_blocks(cls, block_iter, is_binary, sep, endcard, padding):
"""
The meat of `Header.fromfile`; in a separate method so that
`Header.fromfile` itself is just responsible for wrapping file
handling. Also used by `_BaseHDU.fromstring`.
``block_iter`` should be a callable which, given a block size n
(typically 2880 bytes as used by the FITS standard) returns an iterator
of byte strings of that block size.
``is_binary`` specifies whether the returned blocks are bytes or text
Returns both the entire header *string*, and the `Header` object
returned by Header.fromstring on that string.
"""
actual_block_size = _block_size(sep)
clen = Card.length + len(sep)
blocks = block_iter(actual_block_size)
# Read the first header block.
try:
block = next(blocks)
except StopIteration:
raise EOFError()
if not is_binary:
# TODO: There needs to be error handling at *this* level for
# non-ASCII characters; maybe at this stage decoding latin-1 might
# be safer
block = encode_ascii(block)
read_blocks = []
is_eof = False
end_found = False
# continue reading header blocks until END card or EOF is reached
while True:
# find the END card
end_found, block = cls._find_end_card(block, clen)
read_blocks.append(decode_ascii(block))
if end_found:
break
try:
block = next(blocks)
except StopIteration:
is_eof = True
break
if not block:
is_eof = True
break
if not is_binary:
block = encode_ascii(block)
header_str = "".join(read_blocks)
_check_padding(header_str, actual_block_size, is_eof, check_block_size=padding)
if not end_found and is_eof and endcard:
# TODO: Pass this error to validation framework as an ERROR,
# rather than raising an exception
raise OSError("Header missing END card.")
return header_str, cls.fromstring(header_str, sep=sep)
@classmethod
def _find_end_card(cls, block, card_len):
"""
Utility method to search a header block for the END card and handle
invalid END cards.
This method can also returned a modified copy of the input header block
in case an invalid end card needs to be sanitized.
"""
for mo in HEADER_END_RE.finditer(block):
# Ensure the END card was found, and it started on the
# boundary of a new card (see ticket #142)
if mo.start() % card_len != 0:
continue
# This must be the last header block, otherwise the
# file is malformatted
if mo.group("invalid"):
offset = mo.start()
trailing = block[offset + 3 : offset + card_len - 3].rstrip()
if trailing:
trailing = repr(trailing).lstrip("ub")
# TODO: Pass this warning up to the validation framework
warnings.warn(
"Unexpected bytes trailing END keyword: {}; these "
"bytes will be replaced with spaces on write.".format(trailing),
AstropyUserWarning,
)
else:
# TODO: Pass this warning up to the validation framework
warnings.warn(
"Missing padding to end of the FITS block after the "
"END keyword; additional spaces will be appended to "
"the file upon writing to pad out to {} "
"bytes.".format(BLOCK_SIZE),
AstropyUserWarning,
)
# Sanitize out invalid END card now that the appropriate
# warnings have been issued
block = (
block[:offset]
+ encode_ascii(END_CARD)
+ block[offset + len(END_CARD) :]
)
return True, block
return False, block
def tostring(self, sep="", endcard=True, padding=True):
r"""
Returns a string representation of the header.
By default this uses no separator between cards, adds the END card, and
pads the string with spaces to the next multiple of 2880 bytes. That
is, it returns the header exactly as it would appear in a FITS file.
Parameters
----------
sep : str, optional
The character or string with which to separate cards. By default
there is no separator, but one could use ``'\\n'``, for example, to
separate each card with a new line
endcard : bool, optional
If True (default) adds the END card to the end of the header
string
padding : bool, optional
If True (default) pads the string with spaces out to the next
multiple of 2880 characters
Returns
-------
str
A string representing a FITS header.
"""
lines = []
for card in self._cards:
s = str(card)
# Cards with CONTINUE cards may be longer than 80 chars; so break
# them into multiple lines
while s:
lines.append(s[: Card.length])
s = s[Card.length :]
s = sep.join(lines)
if endcard:
s += sep + _pad("END")
if padding:
s += " " * _pad_length(len(s))
return s
def tofile(self, fileobj, sep="", endcard=True, padding=True, overwrite=False):
r"""
Writes the header to file or file-like object.
By default this writes the header exactly as it would be written to a
FITS file, with the END card included and padding to the next multiple
of 2880 bytes. However, aspects of this may be controlled.
Parameters
----------
fileobj : path-like or file-like, optional
Either the pathname of a file, or an open file handle or file-like
object.
sep : str, optional
The character or string with which to separate cards. By default
there is no separator, but one could use ``'\\n'``, for example, to
separate each card with a new line
endcard : bool, optional
If `True` (default) adds the END card to the end of the header
string
padding : bool, optional
If `True` (default) pads the string with spaces out to the next
multiple of 2880 characters
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
"""
close_file = fileobj_closed(fileobj)
if not isinstance(fileobj, _File):
fileobj = _File(fileobj, mode="ostream", overwrite=overwrite)
try:
blocks = self.tostring(sep=sep, endcard=endcard, padding=padding)
actual_block_size = _block_size(sep)
if padding and len(blocks) % actual_block_size != 0:
raise OSError(
"Header size ({}) is not a multiple of block size ({}).".format(
len(blocks) - actual_block_size + BLOCK_SIZE, BLOCK_SIZE
)
)
fileobj.flush()
fileobj.write(blocks.encode("ascii"))
fileobj.flush()
finally:
if close_file:
fileobj.close()
@classmethod
def fromtextfile(cls, fileobj, endcard=False):
"""
Read a header from a simple text file or file-like object.
Equivalent to::
>>> Header.fromfile(fileobj, sep='\\n', endcard=False,
... padding=False)
See Also
--------
fromfile
"""
return cls.fromfile(fileobj, sep="\n", endcard=endcard, padding=False)
def totextfile(self, fileobj, endcard=False, overwrite=False):
"""
Write the header as text to a file or a file-like object.
Equivalent to::
>>> Header.tofile(fileobj, sep='\\n', endcard=False,
... padding=False, overwrite=overwrite)
See Also
--------
tofile
"""
self.tofile(
fileobj, sep="\n", endcard=endcard, padding=False, overwrite=overwrite
)
def clear(self):
"""
Remove all cards from the header.
"""
self._cards = []
self._keyword_indices = collections.defaultdict(list)
self._rvkc_indices = collections.defaultdict(list)
def copy(self, strip=False):
"""
Make a copy of the :class:`Header`.
.. versionchanged:: 1.3
`copy.copy` and `copy.deepcopy` on a `Header` will call this
method.
Parameters
----------
strip : bool, optional
If `True`, strip any headers that are specific to one of the
standard HDU types, so that this header can be used in a different
HDU.
Returns
-------
`Header`
A new :class:`Header` instance.
"""
tmp = self.__class__(copy.copy(card) for card in self._cards)
if strip:
tmp.strip()
return tmp
def __copy__(self):
return self.copy()
def __deepcopy__(self, *args, **kwargs):
return self.copy()
@classmethod
def fromkeys(cls, iterable, value=None):
"""
Similar to :meth:`dict.fromkeys`--creates a new `Header` from an
iterable of keywords and an optional default value.
This method is not likely to be particularly useful for creating real
world FITS headers, but it is useful for testing.
Parameters
----------
iterable
Any iterable that returns strings representing FITS keywords.
value : optional
A default value to assign to each keyword; must be a valid type for
FITS keywords.
Returns
-------
`Header`
A new `Header` instance.
"""
d = cls()
if not isinstance(value, tuple):
value = (value,)
for key in iterable:
d.append((key,) + value)
return d
def get(self, key, default=None):
"""
Similar to :meth:`dict.get`--returns the value associated with keyword
in the header, or a default value if the keyword is not found.
Parameters
----------
key : str
A keyword that may or may not be in the header.
default : optional
A default value to return if the keyword is not found in the
header.
Returns
-------
value: str, number, complex, bool, or ``astropy.io.fits.card.Undefined``
The value associated with the given keyword, or the default value
if the keyword is not in the header.
"""
try:
return self[key]
except (KeyError, IndexError):
return default
def set(self, keyword, value=None, comment=None, before=None, after=None):
"""
Set the value and/or comment and/or position of a specified keyword.
If the keyword does not already exist in the header, a new keyword is
created in the specified position, or appended to the end of the header
if no position is specified.
This method is similar to :meth:`Header.update` prior to Astropy v0.1.
.. note::
It should be noted that ``header.set(keyword, value)`` and
``header.set(keyword, value, comment)`` are equivalent to
``header[keyword] = value`` and
``header[keyword] = (value, comment)`` respectively.
New keywords can also be inserted relative to existing keywords
using, for example::
>>> header.insert('NAXIS1', ('NAXIS', 2, 'Number of axes'))
to insert before an existing keyword, or::
>>> header.insert('NAXIS', ('NAXIS1', 4096), after=True)
to insert after an existing keyword.
The only advantage of using :meth:`Header.set` is that it
easily replaces the old usage of :meth:`Header.update` both
conceptually and in terms of function signature.
Parameters
----------
keyword : str
A header keyword
value : str, optional
The value to set for the given keyword; if None the existing value
is kept, but '' may be used to set a blank value
comment : str, optional
The comment to set for the given keyword; if None the existing
comment is kept, but ``''`` may be used to set a blank comment
before : str, int, optional
Name of the keyword, or index of the `Card` before which this card
should be located in the header. The argument ``before`` takes
precedence over ``after`` if both specified.
after : str, int, optional
Name of the keyword, or index of the `Card` after which this card
should be located in the header.
"""
# Create a temporary card that looks like the one being set; if the
# temporary card turns out to be a RVKC this will make it easier to
# deal with the idiosyncrasies thereof
# Don't try to make a temporary card though if they keyword looks like
# it might be a HIERARCH card or is otherwise invalid--this step is
# only for validating RVKCs.
if (
len(keyword) <= KEYWORD_LENGTH
and Card._keywd_FSC_RE.match(keyword)
and keyword not in self._keyword_indices
):
new_card = Card(keyword, value, comment)
new_keyword = new_card.keyword
else:
new_keyword = keyword
if new_keyword not in Card._commentary_keywords and new_keyword in self:
if comment is None:
comment = self.comments[keyword]
if value is None:
value = self[keyword]
self[keyword] = (value, comment)
if before is not None or after is not None:
card = self._cards[self._cardindex(keyword)]
self._relativeinsert(card, before=before, after=after, replace=True)
elif before is not None or after is not None:
self._relativeinsert((keyword, value, comment), before=before, after=after)
else:
self[keyword] = (value, comment)
def items(self):
"""Like :meth:`dict.items`."""
for card in self._cards:
yield card.keyword, None if card.value == UNDEFINED else card.value
def keys(self):
"""
Like :meth:`dict.keys`--iterating directly over the `Header`
instance has the same behavior.
"""
for card in self._cards:
yield card.keyword
def values(self):
"""Like :meth:`dict.values`."""
for card in self._cards:
yield None if card.value == UNDEFINED else card.value
def pop(self, *args):
"""
Works like :meth:`list.pop` if no arguments or an index argument are
supplied; otherwise works like :meth:`dict.pop`.
"""
if len(args) > 2:
raise TypeError(f"Header.pop expected at most 2 arguments, got {len(args)}")
if len(args) == 0:
key = -1
else:
key = args[0]
try:
value = self[key]
except (KeyError, IndexError):
if len(args) == 2:
return args[1]
raise
del self[key]
return value
def popitem(self):
"""Similar to :meth:`dict.popitem`."""
try:
k, v = next(self.items())
except StopIteration:
raise KeyError("Header is empty")
del self[k]
return k, v
def setdefault(self, key, default=None):
"""Similar to :meth:`dict.setdefault`."""
try:
return self[key]
except (KeyError, IndexError):
self[key] = default
return default
def update(self, *args, **kwargs):
"""
Update the Header with new keyword values, updating the values of
existing keywords and appending new keywords otherwise; similar to
`dict.update`.
`update` accepts either a dict-like object or an iterable. In the
former case the keys must be header keywords and the values may be
either scalar values or (value, comment) tuples. In the case of an
iterable the items must be (keyword, value) tuples or (keyword, value,
comment) tuples.
Arbitrary arguments are also accepted, in which case the update() is
called again with the kwargs dict as its only argument. That is,
::
>>> header.update(NAXIS1=100, NAXIS2=100)
is equivalent to::
header.update({'NAXIS1': 100, 'NAXIS2': 100})
.. warning::
As this method works similarly to `dict.update` it is very
different from the ``Header.update()`` method in Astropy v0.1.
Use of the old API was
**deprecated** for a long time and is now removed. Most uses of the
old API can be replaced as follows:
* Replace ::
header.update(keyword, value)
with ::
header[keyword] = value
* Replace ::
header.update(keyword, value, comment=comment)
with ::
header[keyword] = (value, comment)
* Replace ::
header.update(keyword, value, before=before_keyword)
with ::
header.insert(before_keyword, (keyword, value))
* Replace ::
header.update(keyword, value, after=after_keyword)
with ::
header.insert(after_keyword, (keyword, value),
after=True)
See also :meth:`Header.set` which is a new method that provides an
interface similar to the old ``Header.update()`` and may help make
transition a little easier.
"""
if args:
other = args[0]
else:
other = None
def update_from_dict(k, v):
if not isinstance(v, tuple):
card = Card(k, v)
elif 0 < len(v) <= 2:
card = Card(*((k,) + v))
else:
raise ValueError(
"Header update value for key %r is invalid; the "
"value must be either a scalar, a 1-tuple "
"containing the scalar value, or a 2-tuple "
"containing the value and a comment string." % k
)
self._update(card)
if other is None:
pass
elif isinstance(other, Header):
for card in other.cards:
self._update(card)
elif hasattr(other, "items"):
for k, v in other.items():
update_from_dict(k, v)
elif hasattr(other, "keys"):
for k in other.keys():
update_from_dict(k, other[k])
else:
for idx, card in enumerate(other):
if isinstance(card, Card):
self._update(card)
elif isinstance(card, tuple) and (1 < len(card) <= 3):
self._update(Card(*card))
else:
raise ValueError(
"Header update sequence item #{} is invalid; "
"the item must either be a 2-tuple containing "
"a keyword and value, or a 3-tuple containing "
"a keyword, value, and comment string.".format(idx)
)
if kwargs:
self.update(kwargs)
def append(self, card=None, useblanks=True, bottom=False, end=False):
"""
Appends a new keyword+value card to the end of the Header, similar
to `list.append`.
By default if the last cards in the Header have commentary keywords,
this will append the new keyword before the commentary (unless the new
keyword is also commentary).
Also differs from `list.append` in that it can be called with no
arguments: In this case a blank card is appended to the end of the
Header. In the case all the keyword arguments are ignored.
Parameters
----------
card : str, tuple
A keyword or a (keyword, value, [comment]) tuple representing a
single header card; the comment is optional in which case a
2-tuple may be used
useblanks : bool, optional
If there are blank cards at the end of the Header, replace the
first blank card so that the total number of cards in the Header
does not increase. Otherwise preserve the number of blank cards.
bottom : bool, optional
If True, instead of appending after the last non-commentary card,
append after the last non-blank card.
end : bool, optional
If True, ignore the useblanks and bottom options, and append at the
very end of the Header.
"""
if isinstance(card, str):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif card is None:
card = Card()
elif not isinstance(card, Card):
raise ValueError(
"The value appended to a Header must be either a keyword or "
"(keyword, value, [comment]) tuple; got: {!r}".format(card)
)
if not end and card.is_blank:
# Blank cards should always just be appended to the end
end = True
if end:
self._cards.append(card)
idx = len(self._cards) - 1
else:
idx = len(self._cards) - 1
while idx >= 0 and self._cards[idx].is_blank:
idx -= 1
if not bottom and card.keyword not in Card._commentary_keywords:
while (
idx >= 0 and self._cards[idx].keyword in Card._commentary_keywords
):
idx -= 1
idx += 1
self._cards.insert(idx, card)
self._updateindices(idx)
keyword = Card.normalize_keyword(card.keyword)
self._keyword_indices[keyword].append(idx)
if card.field_specifier is not None:
self._rvkc_indices[card.rawkeyword].append(idx)
if not end:
# If the appended card was a commentary card, and it was appended
# before existing cards with the same keyword, the indices for
# cards with that keyword may have changed
if not bottom and card.keyword in Card._commentary_keywords:
self._keyword_indices[keyword].sort()
# Finally, if useblanks, delete a blank cards from the end
if useblanks and self._countblanks():
# Don't do this unless there is at least one blanks at the end
# of the header; we need to convert the card to its string
# image to see how long it is. In the vast majority of cases
# this will just be 80 (Card.length) but it may be longer for
# CONTINUE cards
self._useblanks(len(str(card)) // Card.length)
self._modified = True
def extend(
self,
cards,
strip=True,
unique=False,
update=False,
update_first=False,
useblanks=True,
bottom=False,
end=False,
):
"""
Appends multiple keyword+value cards to the end of the header, similar
to `list.extend`.
Parameters
----------
cards : iterable
An iterable of (keyword, value, [comment]) tuples; see
`Header.append`.
strip : bool, optional
Remove any keywords that have meaning only to specific types of
HDUs, so that only more general keywords are added from extension
Header or Card list (default: `True`).
unique : bool, optional
If `True`, ensures that no duplicate keywords are appended;
keywords already in this header are simply discarded. The
exception is commentary keywords (COMMENT, HISTORY, etc.): they are
only treated as duplicates if their values match.
update : bool, optional
If `True`, update the current header with the values and comments
from duplicate keywords in the input header. This supersedes the
``unique`` argument. Commentary keywords are treated the same as
if ``unique=True``.
update_first : bool, optional
If the first keyword in the header is 'SIMPLE', and the first
keyword in the input header is 'XTENSION', the 'SIMPLE' keyword is
replaced by the 'XTENSION' keyword. Likewise if the first keyword
in the header is 'XTENSION' and the first keyword in the input
header is 'SIMPLE', the 'XTENSION' keyword is replaced by the
'SIMPLE' keyword. This behavior is otherwise dumb as to whether or
not the resulting header is a valid primary or extension header.
This is mostly provided to support backwards compatibility with the
old ``Header.fromTxtFile`` method, and only applies if
``update=True``.
useblanks, bottom, end : bool, optional
These arguments are passed to :meth:`Header.append` while appending
new cards to the header.
"""
temp = self.__class__(cards)
if strip:
temp.strip()
if len(self):
first = self._cards[0].keyword
else:
first = None
# We don't immediately modify the header, because first we need to sift
# out any duplicates in the new header prior to adding them to the
# existing header, but while *allowing* duplicates from the header
# being extended from (see ticket #156)
extend_cards = []
for idx, card in enumerate(temp.cards):
keyword = card.keyword
if keyword not in Card._commentary_keywords:
if unique and not update and keyword in self:
continue
elif update:
if idx == 0 and update_first:
# Dumbly update the first keyword to either SIMPLE or
# XTENSION as the case may be, as was in the case in
# Header.fromTxtFile
if (keyword == "SIMPLE" and first == "XTENSION") or (
keyword == "XTENSION" and first == "SIMPLE"
):
del self[0]
self.insert(0, card)
else:
self[keyword] = (card.value, card.comment)
elif keyword in self:
self[keyword] = (card.value, card.comment)
else:
extend_cards.append(card)
else:
extend_cards.append(card)
else:
if (unique or update) and keyword in self:
if card.is_blank:
extend_cards.append(card)
continue
for value in self[keyword]:
if value == card.value:
break
else:
extend_cards.append(card)
else:
extend_cards.append(card)
for card in extend_cards:
self.append(card, useblanks=useblanks, bottom=bottom, end=end)
def count(self, keyword):
"""
Returns the count of the given keyword in the header, similar to
`list.count` if the Header object is treated as a list of keywords.
Parameters
----------
keyword : str
The keyword to count instances of in the header
"""
keyword = Card.normalize_keyword(keyword)
# We have to look before we leap, since otherwise _keyword_indices,
# being a defaultdict, will create an entry for the nonexistent keyword
if keyword not in self._keyword_indices:
raise KeyError(f"Keyword {keyword!r} not found.")
return len(self._keyword_indices[keyword])
def index(self, keyword, start=None, stop=None):
"""
Returns the index if the first instance of the given keyword in the
header, similar to `list.index` if the Header object is treated as a
list of keywords.
Parameters
----------
keyword : str
The keyword to look up in the list of all keywords in the header
start : int, optional
The lower bound for the index
stop : int, optional
The upper bound for the index
"""
if start is None:
start = 0
if stop is None:
stop = len(self._cards)
if stop < start:
step = -1
else:
step = 1
norm_keyword = Card.normalize_keyword(keyword)
for idx in range(start, stop, step):
if self._cards[idx].keyword.upper() == norm_keyword:
return idx
else:
raise ValueError(f"The keyword {keyword!r} is not in the header.")
def insert(self, key, card, useblanks=True, after=False):
"""
Inserts a new keyword+value card into the Header at a given location,
similar to `list.insert`.
Parameters
----------
key : int, str, or tuple
The index into the list of header keywords before which the
new keyword should be inserted, or the name of a keyword before
which the new keyword should be inserted. Can also accept a
(keyword, index) tuple for inserting around duplicate keywords.
card : str, tuple
A keyword or a (keyword, value, [comment]) tuple; see
`Header.append`
useblanks : bool, optional
If there are blank cards at the end of the Header, replace the
first blank card so that the total number of cards in the Header
does not increase. Otherwise preserve the number of blank cards.
after : bool, optional
If set to `True`, insert *after* the specified index or keyword,
rather than before it. Defaults to `False`.
"""
if not isinstance(key, numbers.Integral):
# Don't pass through ints to _cardindex because it will not take
# kindly to indices outside the existing number of cards in the
# header, which insert needs to be able to support (for example
# when inserting into empty headers)
idx = self._cardindex(key)
else:
idx = key
if after:
if idx == -1:
idx = len(self._cards)
else:
idx += 1
if idx >= len(self._cards):
# This is just an append (Though it must be an append absolutely to
# the bottom, ignoring blanks, etc.--the point of the insert method
# is that you get exactly what you asked for with no surprises)
self.append(card, end=True)
return
if isinstance(card, str):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif not isinstance(card, Card):
raise ValueError(
"The value inserted into a Header must be either a keyword or "
"(keyword, value, [comment]) tuple; got: {!r}".format(card)
)
self._cards.insert(idx, card)
keyword = card.keyword
# If idx was < 0, determine the actual index according to the rules
# used by list.insert()
if idx < 0:
idx += len(self._cards) - 1
if idx < 0:
idx = 0
# All the keyword indices above the insertion point must be updated
self._updateindices(idx)
keyword = Card.normalize_keyword(keyword)
self._keyword_indices[keyword].append(idx)
count = len(self._keyword_indices[keyword])
if count > 1:
# There were already keywords with this same name
if keyword not in Card._commentary_keywords:
warnings.warn(
"A {!r} keyword already exists in this header. Inserting "
"duplicate keyword.".format(keyword),
AstropyUserWarning,
)
self._keyword_indices[keyword].sort()
if card.field_specifier is not None:
# Update the index of RVKC as well
rvkc_indices = self._rvkc_indices[card.rawkeyword]
rvkc_indices.append(idx)
rvkc_indices.sort()
if useblanks:
self._useblanks(len(str(card)) // Card.length)
self._modified = True
def remove(self, keyword, ignore_missing=False, remove_all=False):
"""
Removes the first instance of the given keyword from the header similar
to `list.remove` if the Header object is treated as a list of keywords.
Parameters
----------
keyword : str
The keyword of which to remove the first instance in the header.
ignore_missing : bool, optional
When True, ignores missing keywords. Otherwise, if the keyword
is not present in the header a KeyError is raised.
remove_all : bool, optional
When True, all instances of keyword will be removed.
Otherwise only the first instance of the given keyword is removed.
"""
keyword = Card.normalize_keyword(keyword)
if keyword in self._keyword_indices:
del self[self._keyword_indices[keyword][0]]
if remove_all:
while keyword in self._keyword_indices:
del self[self._keyword_indices[keyword][0]]
elif not ignore_missing:
raise KeyError(f"Keyword '{keyword}' not found.")
def rename_keyword(self, oldkeyword, newkeyword, force=False):
"""
Rename a card's keyword in the header.
Parameters
----------
oldkeyword : str or int
Old keyword or card index
newkeyword : str
New keyword
force : bool, optional
When `True`, if the new keyword already exists in the header, force
the creation of a duplicate keyword. Otherwise a
`ValueError` is raised.
"""
oldkeyword = Card.normalize_keyword(oldkeyword)
newkeyword = Card.normalize_keyword(newkeyword)
if newkeyword == "CONTINUE":
raise ValueError("Can not rename to CONTINUE")
if (
newkeyword in Card._commentary_keywords
or oldkeyword in Card._commentary_keywords
):
if not (
newkeyword in Card._commentary_keywords
and oldkeyword in Card._commentary_keywords
):
raise ValueError(
"Regular and commentary keys can not be renamed to each other."
)
elif not force and newkeyword in self:
raise ValueError(f"Intended keyword {newkeyword} already exists in header.")
idx = self.index(oldkeyword)
card = self._cards[idx]
del self[idx]
self.insert(idx, (newkeyword, card.value, card.comment))
def add_history(self, value, before=None, after=None):
"""
Add a ``HISTORY`` card.
Parameters
----------
value : str
History text to be added.
before : str or int, optional
Same as in `Header.update`
after : str or int, optional
Same as in `Header.update`
"""
self._add_commentary("HISTORY", value, before=before, after=after)
def add_comment(self, value, before=None, after=None):
"""
Add a ``COMMENT`` card.
Parameters
----------
value : str
Text to be added.
before : str or int, optional
Same as in `Header.update`
after : str or int, optional
Same as in `Header.update`
"""
self._add_commentary("COMMENT", value, before=before, after=after)
def add_blank(self, value="", before=None, after=None):
"""
Add a blank card.
Parameters
----------
value : str, optional
Text to be added.
before : str or int, optional
Same as in `Header.update`
after : str or int, optional
Same as in `Header.update`
"""
self._add_commentary("", value, before=before, after=after)
def strip(self):
"""
Strip cards specific to a certain kind of header.
Strip cards like ``SIMPLE``, ``BITPIX``, etc. so the rest of
the header can be used to reconstruct another kind of header.
"""
# TODO: Previously this only deleted some cards specific to an HDU if
# _hdutype matched that type. But it seemed simple enough to just
# delete all desired cards anyways, and just ignore the KeyErrors if
# they don't exist.
# However, it might be desirable to make this extendable somehow--have
# a way for HDU classes to specify some headers that are specific only
# to that type, and should be removed otherwise.
naxis = self.get("NAXIS", 0)
tfields = self.get("TFIELDS", 0)
for idx in range(naxis):
self.remove("NAXIS" + str(idx + 1), ignore_missing=True)
for name in (
"TFORM",
"TSCAL",
"TZERO",
"TNULL",
"TTYPE",
"TUNIT",
"TDISP",
"TDIM",
"THEAP",
"TBCOL",
):
for idx in range(tfields):
self.remove(name + str(idx + 1), ignore_missing=True)
for name in (
"SIMPLE",
"XTENSION",
"BITPIX",
"NAXIS",
"EXTEND",
"PCOUNT",
"GCOUNT",
"GROUPS",
"BSCALE",
"BZERO",
"TFIELDS",
):
self.remove(name, ignore_missing=True)
@property
def data_size(self):
"""
Return the size (in bytes) of the data portion following the `Header`.
"""
return _hdr_data_size(self)
@property
def data_size_padded(self):
"""
Return the size (in bytes) of the data portion following the `Header`
including padding.
"""
size = self.data_size
return size + _pad_length(size)
def _update(self, card):
"""
The real update code. If keyword already exists, its value and/or
comment will be updated. Otherwise a new card will be appended.
This will not create a duplicate keyword except in the case of
commentary cards. The only other way to force creation of a duplicate
is to use the insert(), append(), or extend() methods.
"""
keyword, value, comment = card
# Lookups for existing/known keywords are case-insensitive
keyword = keyword.strip().upper()
if keyword.startswith("HIERARCH "):
keyword = keyword[9:]
if (
keyword not in Card._commentary_keywords
and keyword in self._keyword_indices
):
# Easy; just update the value/comment
idx = self._keyword_indices[keyword][0]
existing_card = self._cards[idx]
existing_card.value = value
if comment is not None:
# '' should be used to explicitly blank a comment
existing_card.comment = comment
if existing_card._modified:
self._modified = True
elif keyword in Card._commentary_keywords:
cards = self._splitcommentary(keyword, value)
if keyword in self._keyword_indices:
# Append after the last keyword of the same type
idx = self.index(keyword, start=len(self) - 1, stop=-1)
isblank = not (keyword or value or comment)
for c in reversed(cards):
self.insert(idx + 1, c, useblanks=(not isblank))
else:
for c in cards:
self.append(c, bottom=True)
else:
# A new keyword! self.append() will handle updating _modified
self.append(card)
def _cardindex(self, key):
"""Returns an index into the ._cards list given a valid lookup key."""
# This used to just set key = (key, 0) and then go on to act as if the
# user passed in a tuple, but it's much more common to just be given a
# string as the key, so optimize more for that case
if isinstance(key, str):
keyword = key
n = 0
elif isinstance(key, numbers.Integral):
# If < 0, determine the actual index
if key < 0:
key += len(self._cards)
if key < 0 or key >= len(self._cards):
raise IndexError("Header index out of range.")
return key
elif isinstance(key, slice):
return key
elif isinstance(key, tuple):
if (
len(key) != 2
or not isinstance(key[0], str)
or not isinstance(key[1], numbers.Integral)
):
raise ValueError(
"Tuple indices must be 2-tuples consisting of a "
"keyword string and an integer index."
)
keyword, n = key
else:
raise ValueError(
"Header indices must be either a string, a 2-tuple, or an integer."
)
keyword = Card.normalize_keyword(keyword)
# Returns the index into _cards for the n-th card with the given
# keyword (where n is 0-based)
indices = self._keyword_indices.get(keyword, None)
if keyword and not indices:
if len(keyword) > KEYWORD_LENGTH or "." in keyword:
raise KeyError(f"Keyword {keyword!r} not found.")
else:
# Maybe it's a RVKC?
indices = self._rvkc_indices.get(keyword, None)
if not indices:
raise KeyError(f"Keyword {keyword!r} not found.")
try:
return indices[n]
except IndexError:
raise IndexError(
"There are only {} {!r} cards in the header.".format(
len(indices), keyword
)
)
def _keyword_from_index(self, idx):
"""
Given an integer index, return the (keyword, repeat) tuple that index
refers to. For most keywords the repeat will always be zero, but it
may be greater than zero for keywords that are duplicated (especially
commentary keywords).
In a sense this is the inverse of self.index, except that it also
supports duplicates.
"""
if idx < 0:
idx += len(self._cards)
keyword = self._cards[idx].keyword
keyword = Card.normalize_keyword(keyword)
repeat = self._keyword_indices[keyword].index(idx)
return keyword, repeat
def _relativeinsert(self, card, before=None, after=None, replace=False):
"""
Inserts a new card before or after an existing card; used to
implement support for the legacy before/after keyword arguments to
Header.update().
If replace=True, move an existing card with the same keyword.
"""
if before is None:
insertionkey = after
else:
insertionkey = before
def get_insertion_idx():
if not (
isinstance(insertionkey, numbers.Integral)
and insertionkey >= len(self._cards)
):
idx = self._cardindex(insertionkey)
else:
idx = insertionkey
if before is None:
idx += 1
return idx
if replace:
# The card presumably already exists somewhere in the header.
# Check whether or not we actually have to move it; if it does need
# to be moved we just delete it and then it will be reinserted
# below
old_idx = self._cardindex(card.keyword)
insertion_idx = get_insertion_idx()
if insertion_idx >= len(self._cards) and old_idx == len(self._cards) - 1:
# The card would be appended to the end, but it's already at
# the end
return
if before is not None:
if old_idx == insertion_idx - 1:
return
elif after is not None and old_idx == insertion_idx:
return
del self[old_idx]
# Even if replace=True, the insertion idx may have changed since the
# old card was deleted
idx = get_insertion_idx()
if card[0] in Card._commentary_keywords:
cards = reversed(self._splitcommentary(card[0], card[1]))
else:
cards = [card]
for c in cards:
self.insert(idx, c)
def _updateindices(self, idx, increment=True):
"""
For all cards with index above idx, increment or decrement its index
value in the keyword_indices dict.
"""
if idx > len(self._cards):
# Save us some effort
return
increment = 1 if increment else -1
for index_sets in (self._keyword_indices, self._rvkc_indices):
for indices in index_sets.values():
for jdx, keyword_index in enumerate(indices):
if keyword_index >= idx:
indices[jdx] += increment
def _countblanks(self):
"""Returns the number of blank cards at the end of the Header."""
for idx in range(1, len(self._cards)):
if not self._cards[-idx].is_blank:
return idx - 1
return 0
def _useblanks(self, count):
for _ in range(count):
if self._cards[-1].is_blank:
del self[-1]
else:
break
def _haswildcard(self, keyword):
"""Return `True` if the input keyword contains a wildcard pattern."""
return isinstance(keyword, str) and (
keyword.endswith("...") or "*" in keyword or "?" in keyword
)
def _wildcardmatch(self, pattern):
"""
Returns a list of indices of the cards matching the given wildcard
pattern.
* '*' matches 0 or more characters
* '?' matches a single character
* '...' matches 0 or more of any non-whitespace character
"""
pattern = pattern.replace("*", r".*").replace("?", r".")
pattern = pattern.replace("...", r"\S*") + "$"
pattern_re = re.compile(pattern, re.I)
return [
idx
for idx, card in enumerate(self._cards)
if pattern_re.match(card.keyword)
]
def _set_slice(self, key, value, target):
"""
Used to implement Header.__setitem__ and CardAccessor.__setitem__.
"""
if isinstance(key, slice) or self._haswildcard(key):
if isinstance(key, slice):
indices = range(*key.indices(len(target)))
else:
indices = self._wildcardmatch(key)
if isinstance(value, str) or not isiterable(value):
value = itertools.repeat(value, len(indices))
for idx, val in zip(indices, value):
target[idx] = val
return True
return False
def _splitcommentary(self, keyword, value):
"""
Given a commentary keyword and value, returns a list of the one or more
cards needed to represent the full value. This is primarily used to
create the multiple commentary cards needed to represent a long value
that won't fit into a single commentary card.
"""
# The maximum value in each card can be the maximum card length minus
# the maximum key length (which can include spaces if they key length
# less than 8
maxlen = Card.length - KEYWORD_LENGTH
valuestr = str(value)
if len(valuestr) <= maxlen:
# The value can fit in a single card
cards = [Card(keyword, value)]
else:
# The value must be split across multiple consecutive commentary
# cards
idx = 0
cards = []
while idx < len(valuestr):
cards.append(Card(keyword, valuestr[idx : idx + maxlen]))
idx += maxlen
return cards
def _add_commentary(self, key, value, before=None, after=None):
"""
Add a commentary card.
If ``before`` and ``after`` are `None`, add to the last occurrence
of cards of the same name (except blank card). If there is no
card (or blank card), append at the end.
"""
if before is not None or after is not None:
self._relativeinsert((key, value), before=before, after=after)
else:
self[key] = value
collections.abc.MutableSequence.register(Header)
collections.abc.MutableMapping.register(Header)
class _DelayedHeader:
"""
Descriptor used to create the Header object from the header string that
was stored in HDU._header_str when parsing the file.
"""
def __get__(self, obj, owner=None):
try:
return obj.__dict__["_header"]
except KeyError:
if obj._header_str is not None:
hdr = Header.fromstring(obj._header_str)
obj._header_str = None
else:
raise AttributeError(
"'{}' object has no attribute '_header'".format(
obj.__class__.__name__
)
)
obj.__dict__["_header"] = hdr
return hdr
def __set__(self, obj, val):
obj.__dict__["_header"] = val
def __delete__(self, obj):
del obj.__dict__["_header"]
class _BasicHeaderCards:
"""
This class allows to access cards with the _BasicHeader.cards attribute.
This is needed because during the HDU class detection, some HDUs uses
the .cards interface. Cards cannot be modified here as the _BasicHeader
object will be deleted once the HDU object is created.
"""
def __init__(self, header):
self.header = header
def __getitem__(self, key):
# .cards is a list of cards, so key here is an integer.
# get the keyword name from its index.
key = self.header._keys[key]
# then we get the card from the _BasicHeader._cards list, or parse it
# if needed.
try:
return self.header._cards[key]
except KeyError:
cardstr = self.header._raw_cards[key]
card = Card.fromstring(cardstr)
self.header._cards[key] = card
return card
class _BasicHeader(collections.abc.Mapping):
"""This class provides a fast header parsing, without all the additional
features of the Header class. Here only standard keywords are parsed, no
support for CONTINUE, HIERARCH, COMMENT, HISTORY, or rvkc.
The raw card images are stored and parsed only if needed. The idea is that
to create the HDU objects, only a small subset of standard cards is needed.
Once a card is parsed, which is deferred to the Card class, the Card object
is kept in a cache. This is useful because a small subset of cards is used
a lot in the HDU creation process (NAXIS, XTENSION, ...).
"""
def __init__(self, cards):
# dict of (keywords, card images)
self._raw_cards = cards
self._keys = list(cards.keys())
# dict of (keyword, Card object) storing the parsed cards
self._cards = {}
# the _BasicHeaderCards object allows to access Card objects from
# keyword indices
self.cards = _BasicHeaderCards(self)
self._modified = False
def __getitem__(self, key):
if isinstance(key, numbers.Integral):
key = self._keys[key]
try:
return self._cards[key].value
except KeyError:
# parse the Card and store it
cardstr = self._raw_cards[key]
self._cards[key] = card = Card.fromstring(cardstr)
return card.value
def __len__(self):
return len(self._raw_cards)
def __iter__(self):
return iter(self._raw_cards)
def index(self, keyword):
return self._keys.index(keyword)
@property
def data_size(self):
"""
Return the size (in bytes) of the data portion following the `Header`.
"""
return _hdr_data_size(self)
@property
def data_size_padded(self):
"""
Return the size (in bytes) of the data portion following the `Header`
including padding.
"""
size = self.data_size
return size + _pad_length(size)
@classmethod
def fromfile(cls, fileobj):
"""The main method to parse a FITS header from a file. The parsing is
done with the parse_header function implemented in Cython."""
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, "rb")
close_file = True
try:
header_str, cards = parse_header(fileobj)
_check_padding(header_str, BLOCK_SIZE, False)
return header_str, cls(cards)
finally:
if close_file:
fileobj.close()
class _CardAccessor:
"""
This is a generic class for wrapping a Header in such a way that you can
use the header's slice/filtering capabilities to return a subset of cards
and do something with them.
This is sort of the opposite notion of the old CardList class--whereas
Header used to use CardList to get lists of cards, this uses Header to get
lists of cards.
"""
# TODO: Consider giving this dict/list methods like Header itself
def __init__(self, header):
self._header = header
def __repr__(self):
return "\n".join(repr(c) for c in self._header._cards)
def __len__(self):
return len(self._header._cards)
def __iter__(self):
return iter(self._header._cards)
def __eq__(self, other):
# If the `other` item is a scalar we will still treat it as equal if
# this _CardAccessor only contains one item
if not isiterable(other) or isinstance(other, str):
if len(self) == 1:
other = [other]
else:
return False
for a, b in itertools.zip_longest(self, other):
if a != b:
return False
else:
return True
def __ne__(self, other):
return not (self == other)
def __getitem__(self, item):
if isinstance(item, slice) or self._header._haswildcard(item):
return self.__class__(self._header[item])
idx = self._header._cardindex(item)
return self._header._cards[idx]
def _setslice(self, item, value):
"""
Helper for implementing __setitem__ on _CardAccessor subclasses; slices
should always be handled in this same way.
"""
if isinstance(item, slice) or self._header._haswildcard(item):
if isinstance(item, slice):
indices = range(*item.indices(len(self)))
else:
indices = self._header._wildcardmatch(item)
if isinstance(value, str) or not isiterable(value):
value = itertools.repeat(value, len(indices))
for idx, val in zip(indices, value):
self[idx] = val
return True
return False
class _HeaderComments(_CardAccessor):
"""
A class used internally by the Header class for the Header.comments
attribute access.
This object can be used to display all the keyword comments in the Header,
or look up the comments on specific keywords. It allows all the same forms
of keyword lookup as the Header class itself, but returns comments instead
of values.
"""
def __iter__(self):
for card in self._header._cards:
yield card.comment
def __repr__(self):
"""Returns a simple list of all keywords and their comments."""
keyword_length = KEYWORD_LENGTH
for card in self._header._cards:
keyword_length = max(keyword_length, len(card.keyword))
return "\n".join(
"{:>{len}} {}".format(c.keyword, c.comment, len=keyword_length)
for c in self._header._cards
)
def __getitem__(self, item):
"""
Slices and filter strings return a new _HeaderComments containing the
returned cards. Otherwise the comment of a single card is returned.
"""
item = super().__getitem__(item)
if isinstance(item, _HeaderComments):
# The item key was a slice
return item
return item.comment
def __setitem__(self, item, comment):
"""
Set/update the comment on specified card or cards.
Slice/filter updates work similarly to how Header.__setitem__ works.
"""
if self._header._set_slice(item, comment, self):
return
# In this case, key/index errors should be raised; don't update
# comments of nonexistent cards
idx = self._header._cardindex(item)
value = self._header[idx]
self._header[idx] = (value, comment)
class _HeaderCommentaryCards(_CardAccessor):
"""
This is used to return a list-like sequence over all the values in the
header for a given commentary keyword, such as HISTORY.
"""
def __init__(self, header, keyword=""):
super().__init__(header)
self._keyword = keyword
self._count = self._header.count(self._keyword)
self._indices = slice(self._count).indices(self._count)
# __len__ and __iter__ need to be overridden from the base class due to the
# different approach this class has to take for slicing
def __len__(self):
return len(range(*self._indices))
def __iter__(self):
for idx in range(*self._indices):
yield self._header[(self._keyword, idx)]
def __repr__(self):
return "\n".join(str(x) for x in self)
def __getitem__(self, idx):
if isinstance(idx, slice):
n = self.__class__(self._header, self._keyword)
n._indices = idx.indices(self._count)
return n
elif not isinstance(idx, numbers.Integral):
raise ValueError(f"{self._keyword} index must be an integer")
idx = list(range(*self._indices))[idx]
return self._header[(self._keyword, idx)]
def __setitem__(self, item, value):
"""
Set the value of a specified commentary card or cards.
Slice/filter updates work similarly to how Header.__setitem__ works.
"""
if self._header._set_slice(item, value, self):
return
# In this case, key/index errors should be raised; don't update
# comments of nonexistent cards
self._header[(self._keyword, item)] = value
def _block_size(sep):
"""
Determine the size of a FITS header block if a non-blank separator is used
between cards.
"""
return BLOCK_SIZE + (len(sep) * (BLOCK_SIZE // Card.length - 1))
def _pad_length(stringlen):
"""Bytes needed to pad the input stringlen to the next FITS block."""
return (BLOCK_SIZE - (stringlen % BLOCK_SIZE)) % BLOCK_SIZE
def _check_padding(header_str, block_size, is_eof, check_block_size=True):
# Strip any zero-padding (see ticket #106)
if header_str and header_str[-1] == "\0":
if is_eof and header_str.strip("\0") == "":
# TODO: Pass this warning to validation framework
warnings.warn(
"Unexpected extra padding at the end of the file. This "
"padding may not be preserved when saving changes.",
AstropyUserWarning,
)
raise EOFError()
else:
# Replace the illegal null bytes with spaces as required by
# the FITS standard, and issue a nasty warning
# TODO: Pass this warning to validation framework
warnings.warn(
"Header block contains null bytes instead of spaces for "
"padding, and is not FITS-compliant. Nulls may be "
"replaced with spaces upon writing.",
AstropyUserWarning,
)
header_str.replace("\0", " ")
if check_block_size and (len(header_str) % block_size) != 0:
# This error message ignores the length of the separator for
# now, but maybe it shouldn't?
actual_len = len(header_str) - block_size + BLOCK_SIZE
# TODO: Pass this error to validation framework
raise ValueError(f"Header size is not multiple of {BLOCK_SIZE}: {actual_len}")
def _hdr_data_size(header):
"""Calculate the data size (in bytes) following the given `Header`"""
size = 0
naxis = header.get("NAXIS", 0)
if naxis > 0:
size = 1
for idx in range(naxis):
size = size * header["NAXIS" + str(idx + 1)]
bitpix = header["BITPIX"]
gcount = header.get("GCOUNT", 1)
pcount = header.get("PCOUNT", 0)
size = abs(bitpix) * gcount * (pcount + size) // 8
return size
|
b196a528c3287ad7505e95e7a58c68dbba2a7d46ded173723bf41f330ae02c1a | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import copy
import operator
import warnings
import weakref
from contextlib import suppress
from functools import reduce
import numpy as np
from numpy import char as chararray
from astropy.utils import lazyproperty
from .column import (
_VLF,
ASCII2NUMPY,
ASCII2STR,
ASCIITNULL,
FITS2NUMPY,
ColDefs,
Delayed,
_AsciiColDefs,
_FormatP,
_FormatX,
_get_index,
_makep,
_unwrapx,
_wrapx,
)
from .util import _rstrip_inplace, decode_ascii, encode_ascii
class FITS_record:
"""
FITS record class.
`FITS_record` is used to access records of the `FITS_rec` object.
This will allow us to deal with scaled columns. It also handles
conversion/scaling of columns in ASCII tables. The `FITS_record`
class expects a `FITS_rec` object as input.
"""
def __init__(
self, input, row=0, start=None, end=None, step=None, base=None, **kwargs
):
"""
Parameters
----------
input : array
The array to wrap.
row : int, optional
The starting logical row of the array.
start : int, optional
The starting column in the row associated with this object.
Used for subsetting the columns of the `FITS_rec` object.
end : int, optional
The ending column in the row associated with this object.
Used for subsetting the columns of the `FITS_rec` object.
"""
self.array = input
self.row = row
if base:
width = len(base)
else:
width = self.array._nfields
s = slice(start, end, step).indices(width)
self.start, self.end, self.step = s
self.base = base
def __getitem__(self, key):
if isinstance(key, str):
indx = _get_index(self.array.names, key)
if indx < self.start or indx > self.end - 1:
raise KeyError(f"Key '{key}' does not exist.")
elif isinstance(key, slice):
return type(self)(self.array, self.row, key.start, key.stop, key.step, self)
else:
indx = self._get_index(key)
if indx > self.array._nfields - 1:
raise IndexError("Index out of bounds")
return self.array.field(indx)[self.row]
def __setitem__(self, key, value):
if isinstance(key, str):
indx = _get_index(self.array.names, key)
if indx < self.start or indx > self.end - 1:
raise KeyError(f"Key '{key}' does not exist.")
elif isinstance(key, slice):
for indx in range(slice.start, slice.stop, slice.step):
indx = self._get_indx(indx)
self.array.field(indx)[self.row] = value
else:
indx = self._get_index(key)
if indx > self.array._nfields - 1:
raise IndexError("Index out of bounds")
self.array.field(indx)[self.row] = value
def __len__(self):
return len(range(self.start, self.end, self.step))
def __repr__(self):
"""
Display a single row.
"""
outlist = []
for idx in range(len(self)):
outlist.append(repr(self[idx]))
return f"({', '.join(outlist)})"
def field(self, field):
"""
Get the field data of the record.
"""
return self.__getitem__(field)
def setfield(self, field, value):
"""
Set the field data of the record.
"""
self.__setitem__(field, value)
@lazyproperty
def _bases(self):
bases = [weakref.proxy(self)]
base = self.base
while base:
bases.append(base)
base = base.base
return bases
def _get_index(self, indx):
indices = np.ogrid[: self.array._nfields]
for base in reversed(self._bases):
if base.step < 1:
s = slice(base.start, None, base.step)
else:
s = slice(base.start, base.end, base.step)
indices = indices[s]
return indices[indx]
class FITS_rec(np.recarray):
"""
FITS record array class.
`FITS_rec` is the data part of a table HDU's data part. This is a layer
over the `~numpy.recarray`, so we can deal with scaled columns.
It inherits all of the standard methods from `numpy.ndarray`.
"""
_record_type = FITS_record
_character_as_bytes = False
def __new__(subtype, input):
"""
Construct a FITS record array from a recarray.
"""
# input should be a record array
if input.dtype.subdtype is None:
self = np.recarray.__new__(
subtype, input.shape, input.dtype, buf=input.data
)
else:
self = np.recarray.__new__(
subtype, input.shape, input.dtype, buf=input.data, strides=input.strides
)
self._init()
if self.dtype.fields:
self._nfields = len(self.dtype.fields)
return self
def __setstate__(self, state):
meta = state[-1]
column_state = state[-2]
state = state[:-2]
super().__setstate__(state)
self._col_weakrefs = weakref.WeakSet()
for attr, value in zip(meta, column_state):
setattr(self, attr, value)
def __reduce__(self):
"""
Return a 3-tuple for pickling a FITS_rec. Use the super-class
functionality but then add in a tuple of FITS_rec-specific
values that get used in __setstate__.
"""
reconst_func, reconst_func_args, state = super().__reduce__()
# Define FITS_rec-specific attrs that get added to state
column_state = []
meta = []
for attrs in [
"_converted",
"_heapoffset",
"_heapsize",
"_nfields",
"_gap",
"_uint",
"parnames",
"_coldefs",
]:
with suppress(AttributeError):
# _coldefs can be Delayed, and file objects cannot be
# picked, it needs to be deepcopied first
if attrs == "_coldefs":
column_state.append(self._coldefs.__deepcopy__(None))
else:
column_state.append(getattr(self, attrs))
meta.append(attrs)
state = state + (column_state, meta)
return reconst_func, reconst_func_args, state
def __array_finalize__(self, obj):
if obj is None:
return
if isinstance(obj, FITS_rec):
self._character_as_bytes = obj._character_as_bytes
if isinstance(obj, FITS_rec) and obj.dtype == self.dtype:
self._converted = obj._converted
self._heapoffset = obj._heapoffset
self._heapsize = obj._heapsize
self._col_weakrefs = obj._col_weakrefs
self._coldefs = obj._coldefs
self._nfields = obj._nfields
self._gap = obj._gap
self._uint = obj._uint
elif self.dtype.fields is not None:
# This will allow regular ndarrays with fields, rather than
# just other FITS_rec objects
self._nfields = len(self.dtype.fields)
self._converted = {}
self._heapoffset = getattr(obj, "_heapoffset", 0)
self._heapsize = getattr(obj, "_heapsize", 0)
self._gap = getattr(obj, "_gap", 0)
self._uint = getattr(obj, "_uint", False)
self._col_weakrefs = weakref.WeakSet()
self._coldefs = ColDefs(self)
# Work around chicken-egg problem. Column.array relies on the
# _coldefs attribute to set up ref back to parent FITS_rec; however
# in the above line the self._coldefs has not been assigned yet so
# this fails. This patches that up...
for col in self._coldefs:
del col.array
col._parent_fits_rec = weakref.ref(self)
else:
self._init()
def _init(self):
"""Initializes internal attributes specific to FITS-isms."""
self._nfields = 0
self._converted = {}
self._heapoffset = 0
self._heapsize = 0
self._col_weakrefs = weakref.WeakSet()
self._coldefs = None
self._gap = 0
self._uint = False
@classmethod
def from_columns(cls, columns, nrows=0, fill=False, character_as_bytes=False):
"""
Given a `ColDefs` object of unknown origin, initialize a new `FITS_rec`
object.
.. note::
This was originally part of the ``new_table`` function in the table
module but was moved into a class method since most of its
functionality always had more to do with initializing a `FITS_rec`
object than anything else, and much of it also overlapped with
``FITS_rec._scale_back``.
Parameters
----------
columns : sequence of `Column` or a `ColDefs`
The columns from which to create the table data. If these
columns have data arrays attached that data may be used in
initializing the new table. Otherwise the input columns
will be used as a template for a new table with the requested
number of rows.
nrows : int
Number of rows in the new table. If the input columns have data
associated with them, the size of the largest input column is used.
Otherwise the default is 0.
fill : bool
If `True`, will fill all cells with zeros or blanks. If
`False`, copy the data from input, undefined cells will still
be filled with zeros/blanks.
"""
if not isinstance(columns, ColDefs):
columns = ColDefs(columns)
# read the delayed data
for column in columns:
arr = column.array
if isinstance(arr, Delayed):
if arr.hdu.data is None:
column.array = None
else:
column.array = _get_recarray_field(arr.hdu.data, arr.field)
# Reset columns._arrays (which we may want to just do away with
# altogether
del columns._arrays
# use the largest column shape as the shape of the record
if nrows == 0:
for arr in columns._arrays:
if arr is not None:
dim = arr.shape[0]
else:
dim = 0
if dim > nrows:
nrows = dim
raw_data = np.empty(columns.dtype.itemsize * nrows, dtype=np.uint8)
raw_data.fill(ord(columns._padding_byte))
data = np.recarray(nrows, dtype=columns.dtype, buf=raw_data).view(cls)
data._character_as_bytes = character_as_bytes
# Previously this assignment was made from hdu.columns, but that's a
# bug since if a _TableBaseHDU has a FITS_rec in its .data attribute
# the _TableBaseHDU.columns property is actually returned from
# .data._coldefs, so this assignment was circular! Don't make that
# mistake again.
# All of this is an artifact of the fragility of the FITS_rec class,
# and that it can't just be initialized by columns...
data._coldefs = columns
# If fill is True we don't copy anything from the column arrays. We're
# just using them as a template, and returning a table filled with
# zeros/blanks
if fill:
return data
# Otherwise we have to fill the recarray with data from the input
# columns
for idx, column in enumerate(columns):
# For each column in the ColDef object, determine the number of
# rows in that column. This will be either the number of rows in
# the ndarray associated with the column, or the number of rows
# given in the call to this function, which ever is smaller. If
# the input FILL argument is true, the number of rows is set to
# zero so that no data is copied from the original input data.
arr = column.array
if arr is None:
array_size = 0
else:
array_size = len(arr)
n = min(array_size, nrows)
# TODO: At least *some* of this logic is mostly redundant with the
# _convert_foo methods in this class; see if we can eliminate some
# of that duplication.
if not n:
# The input column had an empty array, so just use the fill
# value
continue
field = _get_recarray_field(data, idx)
name = column.name
fitsformat = column.format
recformat = fitsformat.recformat
outarr = field[:n]
inarr = arr[:n]
if isinstance(recformat, _FormatX):
# Data is a bit array
if inarr.shape[-1] == recformat.repeat:
_wrapx(inarr, outarr, recformat.repeat)
continue
elif isinstance(recformat, _FormatP):
data._cache_field(name, _makep(inarr, field, recformat, nrows=nrows))
continue
# TODO: Find a better way of determining that the column is meant
# to be FITS L formatted
elif recformat[-2:] == FITS2NUMPY["L"] and inarr.dtype == bool:
# column is boolean
# The raw data field should be filled with either 'T' or 'F'
# (not 0). Use 'F' as a default
field[:] = ord("F")
# Also save the original boolean array in data._converted so
# that it doesn't have to be re-converted
converted = np.zeros(field.shape, dtype=bool)
converted[:n] = inarr
data._cache_field(name, converted)
# TODO: Maybe this step isn't necessary at all if _scale_back
# will handle it?
inarr = np.where(inarr == np.False_, ord("F"), ord("T"))
elif columns[idx]._physical_values and columns[idx]._pseudo_unsigned_ints:
# Temporary hack...
bzero = column.bzero
converted = np.zeros(field.shape, dtype=inarr.dtype)
converted[:n] = inarr
data._cache_field(name, converted)
if n < nrows:
# Pre-scale rows below the input data
field[n:] = -bzero
inarr = inarr - bzero
elif isinstance(columns, _AsciiColDefs):
# Regardless whether the format is character or numeric, if the
# input array contains characters then it's already in the raw
# format for ASCII tables
if fitsformat._pseudo_logical:
# Hack to support converting from 8-bit T/F characters
# Normally the column array is a chararray of 1 character
# strings, but we need to view it as a normal ndarray of
# 8-bit ints to fill it with ASCII codes for 'T' and 'F'
outarr = field.view(np.uint8, np.ndarray)[:n]
elif arr.dtype.kind not in ("S", "U"):
# Set up views of numeric columns with the appropriate
# numeric dtype
# Fill with the appropriate blanks for the column format
data._cache_field(name, np.zeros(nrows, dtype=arr.dtype))
outarr = data._converted[name][:n]
outarr[:] = inarr
continue
if inarr.shape != outarr.shape:
if (
inarr.dtype.kind == outarr.dtype.kind
and inarr.dtype.kind in ("U", "S")
and inarr.dtype != outarr.dtype
):
inarr_rowsize = inarr[0].size
inarr = inarr.flatten().view(outarr.dtype)
# This is a special case to handle input arrays with
# non-trivial TDIMn.
# By design each row of the outarray is 1-D, while each row of
# the input array may be n-D
if outarr.ndim > 1:
# The normal case where the first dimension is the rows
inarr_rowsize = inarr[0].size
inarr = inarr.reshape(n, inarr_rowsize)
outarr[:, :inarr_rowsize] = inarr
else:
# Special case for strings where the out array only has one
# dimension (the second dimension is rolled up into the
# strings
outarr[:n] = inarr.ravel()
else:
outarr[:] = inarr
# Now replace the original column array references with the new
# fields
# This is required to prevent the issue reported in
# https://github.com/spacetelescope/PyFITS/issues/99
for idx in range(len(columns)):
columns._arrays[idx] = data.field(idx)
return data
def __repr__(self):
# Force use of the normal ndarray repr (rather than the new
# one added for recarray in Numpy 1.10) for backwards compat
return np.ndarray.__repr__(self)
def __getattribute__(self, attr):
# First, see if ndarray has this attr, and return it if so. Note that
# this means a field with the same name as an ndarray attr cannot be
# accessed by attribute, this is Numpy's default behavior.
# We avoid using np.recarray.__getattribute__ here because after doing
# this check it would access the columns without doing the conversions
# that we need (with .field, see below).
try:
return object.__getattribute__(self, attr)
except AttributeError:
pass
# attr might still be a fieldname. If we have column definitions,
# we should access this via .field, as the data may have to be scaled.
if self._coldefs is not None and attr in self.columns.names:
return self.field(attr)
# If not, just let the usual np.recarray override deal with it.
return super().__getattribute__(attr)
def __getitem__(self, key):
if self._coldefs is None:
return super().__getitem__(key)
if isinstance(key, str):
return self.field(key)
# Have to view as a recarray then back as a FITS_rec, otherwise the
# circular reference fix/hack in FITS_rec.field() won't preserve
# the slice.
out = self.view(np.recarray)[key]
if type(out) is not np.recarray:
# Oops, we got a single element rather than a view. In that case,
# return a Record, which has no __getstate__ and is more efficient.
return self._record_type(self, key)
# We got a view; change it back to our class, and add stuff
out = out.view(type(self))
out._uint = self._uint
out._coldefs = ColDefs(self._coldefs)
arrays = []
out._converted = {}
for idx, name in enumerate(self._coldefs.names):
#
# Store the new arrays for the _coldefs object
#
arrays.append(self._coldefs._arrays[idx][key])
# Ensure that the sliced FITS_rec will view the same scaled
# columns as the original; this is one of the few cases where
# it is not necessary to use _cache_field()
if name in self._converted:
dummy = self._converted[name]
field = np.ndarray.__getitem__(dummy, key)
out._converted[name] = field
out._coldefs._arrays = arrays
return out
def __setitem__(self, key, value):
if self._coldefs is None:
return super().__setitem__(key, value)
if isinstance(key, str):
self[key][:] = value
return
if isinstance(key, slice):
end = min(len(self), key.stop or len(self))
end = max(0, end)
start = max(0, key.start or 0)
end = min(end, start + len(value))
for idx in range(start, end):
self.__setitem__(idx, value[idx - start])
return
if isinstance(value, FITS_record):
for idx in range(self._nfields):
self.field(self.names[idx])[key] = value.field(self.names[idx])
elif isinstance(value, (tuple, list, np.void)):
if self._nfields == len(value):
for idx in range(self._nfields):
self.field(idx)[key] = value[idx]
else:
raise ValueError(
"Input tuple or list required to have {} elements.".format(
self._nfields
)
)
else:
raise TypeError(
"Assignment requires a FITS_record, tuple, or list as input."
)
def _ipython_key_completions_(self):
return self.names
def copy(self, order="C"):
"""
The Numpy documentation lies; `numpy.ndarray.copy` is not equivalent to
`numpy.copy`. Differences include that it re-views the copied array as
self's ndarray subclass, as though it were taking a slice; this means
``__array_finalize__`` is called and the copy shares all the array
attributes (including ``._converted``!). So we need to make a deep
copy of all those attributes so that the two arrays truly do not share
any data.
"""
new = super().copy(order=order)
new.__dict__ = copy.deepcopy(self.__dict__)
return new
@property
def columns(self):
"""A user-visible accessor for the coldefs."""
return self._coldefs
@property
def _coldefs(self):
# This used to be a normal internal attribute, but it was changed to a
# property as a quick and transparent way to work around the reference
# leak bug fixed in https://github.com/astropy/astropy/pull/4539
#
# See the long comment in the Column.array property for more details
# on this. But in short, FITS_rec now has a ._col_weakrefs attribute
# which is a WeakSet of weakrefs to each Column in _coldefs.
#
# So whenever ._coldefs is set we also add each Column in the ColDefs
# to the weakrefs set. This is an easy way to find out if a Column has
# any references to it external to the FITS_rec (i.e. a user assigned a
# column to a variable). If the column is still in _col_weakrefs then
# there are other references to it external to this FITS_rec. We use
# that information in __del__ to save off copies of the array data
# for those columns to their Column.array property before our memory
# is freed.
return self.__dict__.get("_coldefs")
@_coldefs.setter
def _coldefs(self, cols):
self.__dict__["_coldefs"] = cols
if isinstance(cols, ColDefs):
for col in cols.columns:
self._col_weakrefs.add(col)
@_coldefs.deleter
def _coldefs(self):
try:
del self.__dict__["_coldefs"]
except KeyError as exc:
raise AttributeError(exc.args[0])
def __del__(self):
try:
del self._coldefs
if self.dtype.fields is not None:
for col in self._col_weakrefs:
if col.array is not None:
col.array = col.array.copy()
# See issues #4690 and #4912
except (AttributeError, TypeError): # pragma: no cover
pass
@property
def names(self):
"""List of column names."""
if self.dtype.fields:
return list(self.dtype.names)
elif getattr(self, "_coldefs", None) is not None:
return self._coldefs.names
else:
return None
@property
def formats(self):
"""List of column FITS formats."""
if getattr(self, "_coldefs", None) is not None:
return self._coldefs.formats
return None
@property
def _raw_itemsize(self):
"""
Returns the size of row items that would be written to the raw FITS
file, taking into account the possibility of unicode columns being
compactified.
Currently for internal use only.
"""
if _has_unicode_fields(self):
total_itemsize = 0
for field in self.dtype.fields.values():
itemsize = field[0].itemsize
if field[0].kind == "U":
itemsize = itemsize // 4
total_itemsize += itemsize
return total_itemsize
else:
# Just return the normal itemsize
return self.itemsize
def field(self, key):
"""
A view of a `Column`'s data as an array.
"""
# NOTE: The *column* index may not be the same as the field index in
# the recarray, if the column is a phantom column
column = self.columns[key]
name = column.name
format = column.format
if format.dtype.itemsize == 0:
warnings.warn(
"Field {!r} has a repeat count of 0 in its format code, "
"indicating an empty field.".format(key)
)
return np.array([], dtype=format.dtype)
# If field's base is a FITS_rec, we can run into trouble because it
# contains a reference to the ._coldefs object of the original data;
# this can lead to a circular reference; see ticket #49
base = self
while isinstance(base, FITS_rec) and isinstance(base.base, np.recarray):
base = base.base
# base could still be a FITS_rec in some cases, so take care to
# use rec.recarray.field to avoid a potential infinite
# recursion
field = _get_recarray_field(base, name)
if name not in self._converted:
recformat = format.recformat
# TODO: If we're now passing the column to these subroutines, do we
# really need to pass them the recformat?
if isinstance(recformat, _FormatP):
# for P format
converted = self._convert_p(column, field, recformat)
else:
# Handle all other column data types which are fixed-width
# fields
converted = self._convert_other(column, field, recformat)
# Note: Never assign values directly into the self._converted dict;
# always go through self._cache_field; this way self._converted is
# only used to store arrays that are not already direct views of
# our own data.
self._cache_field(name, converted)
return converted
return self._converted[name]
def _cache_field(self, name, field):
"""
Do not store fields in _converted if one of its bases is self,
or if it has a common base with self.
This results in a reference cycle that cannot be broken since
ndarrays do not participate in cyclic garbage collection.
"""
base = field
while True:
self_base = self
while True:
if self_base is base:
return
if getattr(self_base, "base", None) is not None:
self_base = self_base.base
else:
break
if getattr(base, "base", None) is not None:
base = base.base
else:
break
self._converted[name] = field
def _update_column_attribute_changed(self, column, idx, attr, old_value, new_value):
"""
Update how the data is formatted depending on changes to column
attributes initiated by the user through the `Column` interface.
Dispatches column attribute change notifications to individual methods
for each attribute ``_update_column_<attr>``
"""
method_name = f"_update_column_{attr}"
if hasattr(self, method_name):
# Right now this is so we can be lazy and not implement updaters
# for every attribute yet--some we may not need at all, TBD
getattr(self, method_name)(column, idx, old_value, new_value)
def _update_column_name(self, column, idx, old_name, name):
"""Update the dtype field names when a column name is changed."""
dtype = self.dtype
# Updating the names on the dtype should suffice
dtype.names = dtype.names[:idx] + (name,) + dtype.names[idx + 1 :]
def _convert_x(self, field, recformat):
"""Convert a raw table column to a bit array as specified by the
FITS X format.
"""
dummy = np.zeros(self.shape + (recformat.repeat,), dtype=np.bool_)
_unwrapx(field, dummy, recformat.repeat)
return dummy
def _convert_p(self, column, field, recformat):
"""Convert a raw table column of FITS P or Q format descriptors
to a VLA column with the array data returned from the heap.
"""
dummy = _VLF([None] * len(self), dtype=recformat.dtype)
raw_data = self._get_raw_data()
if raw_data is None:
raise OSError(
"Could not find heap data for the {!r} variable-length "
"array column.".format(column.name)
)
for idx in range(len(self)):
offset = field[idx, 1] + self._heapoffset
count = field[idx, 0]
if recformat.dtype == "a":
dt = np.dtype(recformat.dtype + str(1))
arr_len = count * dt.itemsize
da = raw_data[offset : offset + arr_len].view(dt)
da = np.char.array(da.view(dtype=dt), itemsize=count)
dummy[idx] = decode_ascii(da)
else:
dt = np.dtype(recformat.dtype)
arr_len = count * dt.itemsize
dummy[idx] = raw_data[offset : offset + arr_len].view(dt)
dummy[idx].dtype = dummy[idx].dtype.newbyteorder(">")
# Each array in the field may now require additional
# scaling depending on the other scaling parameters
# TODO: The same scaling parameters apply to every
# array in the column so this is currently very slow; we
# really only need to check once whether any scaling will
# be necessary and skip this step if not
# TODO: Test that this works for X format; I don't think
# that it does--the recformat variable only applies to the P
# format not the X format
dummy[idx] = self._convert_other(column, dummy[idx], recformat)
return dummy
def _convert_ascii(self, column, field):
"""
Special handling for ASCII table columns to convert columns containing
numeric types to actual numeric arrays from the string representation.
"""
format = column.format
recformat = getattr(format, "recformat", ASCII2NUMPY[format[0]])
# if the string = TNULL, return ASCIITNULL
nullval = str(column.null).strip().encode("ascii")
if len(nullval) > format.width:
nullval = nullval[: format.width]
# Before using .replace make sure that any trailing bytes in each
# column are filled with spaces, and *not*, say, nulls; this causes
# functions like replace to potentially leave gibberish bytes in the
# array buffer.
dummy = np.char.ljust(field, format.width)
dummy = np.char.replace(dummy, encode_ascii("D"), encode_ascii("E"))
null_fill = encode_ascii(str(ASCIITNULL).rjust(format.width))
# Convert all fields equal to the TNULL value (nullval) to empty fields.
# TODO: These fields really should be converted to NaN or something else undefined.
# Currently they are converted to empty fields, which are then set to zero.
dummy = np.where(np.char.strip(dummy) == nullval, null_fill, dummy)
# always replace empty fields, see https://github.com/astropy/astropy/pull/5394
if nullval != b"":
dummy = np.where(np.char.strip(dummy) == b"", null_fill, dummy)
try:
dummy = np.array(dummy, dtype=recformat)
except ValueError as exc:
indx = self.names.index(column.name)
raise ValueError(
"{}; the header may be missing the necessary TNULL{} "
"keyword or the table contains invalid data".format(exc, indx + 1)
)
return dummy
def _convert_other(self, column, field, recformat):
"""Perform conversions on any other fixed-width column data types.
This may not perform any conversion at all if it's not necessary, in
which case the original column array is returned.
"""
if isinstance(recformat, _FormatX):
# special handling for the X format
return self._convert_x(field, recformat)
(
_str,
_bool,
_number,
_scale,
_zero,
bscale,
bzero,
dim,
) = self._get_scale_factors(column)
indx = self.names.index(column.name)
# ASCII table, convert strings to numbers
# TODO:
# For now, check that these are ASCII columns by checking the coldefs
# type; in the future all columns (for binary tables, ASCII tables, or
# otherwise) should "know" what type they are already and how to handle
# converting their data from FITS format to native format and vice
# versa...
if not _str and isinstance(self._coldefs, _AsciiColDefs):
field = self._convert_ascii(column, field)
# Test that the dimensions given in dim are sensible; otherwise
# display a warning and ignore them
if dim:
# See if the dimensions already match, if not, make sure the
# number items will fit in the specified dimensions
if field.ndim > 1:
actual_shape = field.shape[1:]
if _str:
actual_shape = actual_shape + (field.itemsize,)
else:
actual_shape = field.shape[0]
if dim == actual_shape:
# The array already has the correct dimensions, so we
# ignore dim and don't convert
dim = None
else:
nitems = reduce(operator.mul, dim)
if _str:
actual_nitems = field.itemsize
elif (
len(field.shape) == 1
): # No repeat count in TFORMn, equivalent to 1
actual_nitems = 1
else:
actual_nitems = field.shape[1]
if nitems > actual_nitems:
warnings.warn(
"TDIM{} value {:d} does not fit with the size of "
"the array items ({:d}). TDIM{:d} will be ignored.".format(
indx + 1, self._coldefs[indx].dims, actual_nitems, indx + 1
)
)
dim = None
# further conversion for both ASCII and binary tables
# For now we've made columns responsible for *knowing* whether their
# data has been scaled, but we make the FITS_rec class responsible for
# actually doing the scaling
# TODO: This also needs to be fixed in the effort to make Columns
# responsible for scaling their arrays to/from FITS native values
if not column.ascii and column.format.p_format:
format_code = column.format.p_format
else:
# TODO: Rather than having this if/else it might be nice if the
# ColumnFormat class had an attribute guaranteed to give the format
# of actual values in a column regardless of whether the true
# format is something like P or Q
format_code = column.format.format
if _number and (_scale or _zero) and not column._physical_values:
# This is to handle pseudo unsigned ints in table columns
# TODO: For now this only really works correctly for binary tables
# Should it work for ASCII tables as well?
if self._uint:
if bzero == 2**15 and format_code == "I":
field = np.array(field, dtype=np.uint16)
elif bzero == 2**31 and format_code == "J":
field = np.array(field, dtype=np.uint32)
elif bzero == 2**63 and format_code == "K":
field = np.array(field, dtype=np.uint64)
bzero64 = np.uint64(2**63)
else:
field = np.array(field, dtype=np.float64)
else:
field = np.array(field, dtype=np.float64)
if _scale:
np.multiply(field, bscale, field)
if _zero:
if self._uint and format_code == "K":
# There is a chance of overflow, so be careful
test_overflow = field.copy()
try:
test_overflow += bzero64
except OverflowError:
warnings.warn(
"Overflow detected while applying TZERO{:d}. "
"Returning unscaled data.".format(indx + 1)
)
else:
field = test_overflow
else:
field += bzero
# mark the column as scaled
column._physical_values = True
elif _bool and field.dtype != bool:
field = np.equal(field, ord("T"))
elif _str:
if not self._character_as_bytes:
with suppress(UnicodeDecodeError):
field = decode_ascii(field)
if dim:
# Apply the new field item dimensions
nitems = reduce(operator.mul, dim)
if field.ndim > 1:
field = field[:, :nitems]
if _str:
fmt = field.dtype.char
dtype = (f"|{fmt}{dim[-1]}", dim[:-1])
field.dtype = dtype
else:
field.shape = (field.shape[0],) + dim
return field
def _get_heap_data(self):
"""
Returns a pointer into the table's raw data to its heap (if present).
This is returned as a numpy byte array.
"""
if self._heapsize:
raw_data = self._get_raw_data().view(np.ubyte)
heap_end = self._heapoffset + self._heapsize
return raw_data[self._heapoffset : heap_end]
else:
return np.array([], dtype=np.ubyte)
def _get_raw_data(self):
"""
Returns the base array of self that "raw data array" that is the
array in the format that it was first read from a file before it was
sliced or viewed as a different type in any way.
This is determined by walking through the bases until finding one that
has at least the same number of bytes as self, plus the heapsize. This
may be the immediate .base but is not always. This is used primarily
for variable-length array support which needs to be able to find the
heap (the raw data *may* be larger than nbytes + heapsize if it
contains a gap or padding).
May return ``None`` if no array resembling the "raw data" according to
the stated criteria can be found.
"""
raw_data_bytes = self.nbytes + self._heapsize
base = self
while hasattr(base, "base") and base.base is not None:
base = base.base
# Variable-length-arrays: should take into account the case of
# empty arrays
if hasattr(base, "_heapoffset"):
if hasattr(base, "nbytes") and base.nbytes > raw_data_bytes:
return base
# non variable-length-arrays
else:
if hasattr(base, "nbytes") and base.nbytes >= raw_data_bytes:
return base
def _get_scale_factors(self, column):
"""Get all the scaling flags and factors for one column."""
# TODO: Maybe this should be a method/property on Column? Or maybe
# it's not really needed at all...
_str = column.format.format == "A"
_bool = column.format.format == "L"
_number = not (_bool or _str)
bscale = column.bscale
bzero = column.bzero
_scale = bscale not in ("", None, 1)
_zero = bzero not in ("", None, 0)
# ensure bscale/bzero are numbers
if not _scale:
bscale = 1
if not _zero:
bzero = 0
# column._dims gives a tuple, rather than column.dim which returns the
# original string format code from the FITS header...
dim = column._dims
return (_str, _bool, _number, _scale, _zero, bscale, bzero, dim)
def _scale_back(self, update_heap_pointers=True):
"""
Update the parent array, using the (latest) scaled array.
If ``update_heap_pointers`` is `False`, this will leave all the heap
pointers in P/Q columns as they are verbatim--it only makes sense to do
this if there is already data on the heap and it can be guaranteed that
that data has not been modified, and there is not new data to add to
the heap. Currently this is only used as an optimization for
CompImageHDU that does its own handling of the heap.
"""
# Running total for the new heap size
heapsize = 0
for indx, name in enumerate(self.dtype.names):
column = self._coldefs[indx]
recformat = column.format.recformat
raw_field = _get_recarray_field(self, indx)
# add the location offset of the heap area for each
# variable length column
if isinstance(recformat, _FormatP):
# Irritatingly, this can return a different dtype than just
# doing np.dtype(recformat.dtype); but this returns the results
# that we want. For example if recformat.dtype is 'a' we want
# an array of characters.
dtype = np.array([], dtype=recformat.dtype).dtype
if update_heap_pointers and name in self._converted:
# The VLA has potentially been updated, so we need to
# update the array descriptors
raw_field[:] = 0 # reset
npts = [len(arr) for arr in self._converted[name]]
raw_field[: len(npts), 0] = npts
raw_field[1:, 1] = (
np.add.accumulate(raw_field[:-1, 0]) * dtype.itemsize
)
raw_field[:, 1][:] += heapsize
heapsize += raw_field[:, 0].sum() * dtype.itemsize
# Even if this VLA has not been read or updated, we need to
# include the size of its constituent arrays in the heap size
# total
if heapsize >= 2**31:
raise ValueError(
"The heapsize limit for 'P' format "
"has been reached. "
"Please consider using the 'Q' format "
"for your file."
)
if isinstance(recformat, _FormatX) and name in self._converted:
_wrapx(self._converted[name], raw_field, recformat.repeat)
continue
(
_str,
_bool,
_number,
_scale,
_zero,
bscale,
bzero,
_,
) = self._get_scale_factors(column)
field = self._converted.get(name, raw_field)
# conversion for both ASCII and binary tables
if _number or _str:
if _number and (_scale or _zero) and column._physical_values:
dummy = field.copy()
if _zero:
dummy -= bzero
if _scale:
dummy /= bscale
# This will set the raw values in the recarray back to
# their non-physical storage values, so the column should
# be mark is not scaled
column._physical_values = False
elif _str or isinstance(self._coldefs, _AsciiColDefs):
dummy = field
else:
continue
# ASCII table, convert numbers to strings
if isinstance(self._coldefs, _AsciiColDefs):
self._scale_back_ascii(indx, dummy, raw_field)
# binary table string column
elif isinstance(raw_field, chararray.chararray):
self._scale_back_strings(indx, dummy, raw_field)
# all other binary table columns
else:
if len(raw_field) and isinstance(raw_field[0], np.integer):
dummy = np.around(dummy)
if raw_field.shape == dummy.shape:
raw_field[:] = dummy
else:
# Reshaping the data is necessary in cases where the
# TDIMn keyword was used to shape a column's entries
# into arrays
raw_field[:] = dummy.ravel().view(raw_field.dtype)
del dummy
# ASCII table does not have Boolean type
elif _bool and name in self._converted:
choices = (
np.array([ord("F")], dtype=np.int8)[0],
np.array([ord("T")], dtype=np.int8)[0],
)
raw_field[:] = np.choose(field, choices)
# Store the updated heapsize
self._heapsize = heapsize
def _scale_back_strings(self, col_idx, input_field, output_field):
# There are a few possibilities this has to be able to handle properly
# The input_field, which comes from the _converted column is of dtype
# 'Un' so that elements read out of the array are normal str
# objects (i.e. unicode strings)
#
# At the other end the *output_field* may also be of type 'S' or of
# type 'U'. It will *usually* be of type 'S' because when reading
# an existing FITS table the raw data is just ASCII strings, and
# represented in Numpy as an S array. However, when a user creates
# a new table from scratch, they *might* pass in a column containing
# unicode strings (dtype 'U'). Therefore the output_field of the
# raw array is actually a unicode array. But we still want to make
# sure the data is encodable as ASCII. Later when we write out the
# array we use, in the dtype 'U' case, a different write routine
# that writes row by row and encodes any 'U' columns to ASCII.
# If the output_field is non-ASCII we will worry about ASCII encoding
# later when writing; otherwise we can do it right here
if input_field.dtype.kind == "U" and output_field.dtype.kind == "S":
try:
_ascii_encode(input_field, out=output_field)
except _UnicodeArrayEncodeError as exc:
raise ValueError(
"Could not save column '{}': Contains characters that "
"cannot be encoded as ASCII as required by FITS, starting "
"at the index {!r} of the column, and the index {} of "
"the string at that location.".format(
self._coldefs[col_idx].name,
exc.index[0] if len(exc.index) == 1 else exc.index,
exc.start,
)
)
else:
# Otherwise go ahead and do a direct copy into--if both are type
# 'U' we'll handle encoding later
input_field = input_field.flatten().view(output_field.dtype)
output_field.flat[:] = input_field
# Ensure that blanks at the end of each string are
# converted to nulls instead of spaces, see Trac #15
# and #111
_rstrip_inplace(output_field)
def _scale_back_ascii(self, col_idx, input_field, output_field):
"""
Convert internal array values back to ASCII table representation.
The ``input_field`` is the internal representation of the values, and
the ``output_field`` is the character array representing the ASCII
output that will be written.
"""
starts = self._coldefs.starts[:]
spans = self._coldefs.spans
format = self._coldefs[col_idx].format
# The the index of the "end" column of the record, beyond
# which we can't write
end = super().field(-1).itemsize
starts.append(end + starts[-1])
if col_idx > 0:
lead = starts[col_idx] - starts[col_idx - 1] - spans[col_idx - 1]
else:
lead = 0
if lead < 0:
warnings.warn(
"Column {!r} starting point overlaps the previous column.".format(
col_idx + 1
)
)
trail = starts[col_idx + 1] - starts[col_idx] - spans[col_idx]
if trail < 0:
warnings.warn(
f"Column {col_idx + 1!r} ending point overlaps the next column."
)
# TODO: It would be nice if these string column formatting
# details were left to a specialized class, as is the case
# with FormatX and FormatP
if "A" in format:
_pc = "{:"
else:
_pc = "{:>"
fmt = "".join([_pc, format[1:], ASCII2STR[format[0]], "}", (" " * trail)])
# Even if the format precision is 0, we should output a decimal point
# as long as there is space to do so--not including a decimal point in
# a float value is discouraged by the FITS Standard
trailing_decimal = format.precision == 0 and format.format in ("F", "E", "D")
# not using numarray.strings's num2char because the
# result is not allowed to expand (as C/Python does).
for jdx, value in enumerate(input_field):
value = fmt.format(value)
if len(value) > starts[col_idx + 1] - starts[col_idx]:
raise ValueError(
"Value {!r} does not fit into the output's itemsize of {}.".format(
value, spans[col_idx]
)
)
if trailing_decimal and value[0] == " ":
# We have some extra space in the field for the trailing
# decimal point
value = value[1:] + "."
output_field[jdx] = value
# Replace exponent separator in floating point numbers
if "D" in format:
output_field[:] = output_field.replace(b"E", b"D")
def tolist(self):
# Override .tolist to take care of special case of VLF
column_lists = [self[name].tolist() for name in self.columns.names]
return [list(row) for row in zip(*column_lists)]
def _get_recarray_field(array, key):
"""
Compatibility function for using the recarray base class's field method.
This incorporates the legacy functionality of returning string arrays as
Numeric-style chararray objects.
"""
# Numpy >= 1.10.dev recarray no longer returns chararrays for strings
# This is currently needed for backwards-compatibility and for
# automatic truncation of trailing whitespace
field = np.recarray.field(array, key)
if field.dtype.char in ("S", "U") and not isinstance(field, chararray.chararray):
field = field.view(chararray.chararray)
return field
class _UnicodeArrayEncodeError(UnicodeEncodeError):
def __init__(self, encoding, object_, start, end, reason, index):
super().__init__(encoding, object_, start, end, reason)
self.index = index
def _ascii_encode(inarray, out=None):
"""
Takes a unicode array and fills the output string array with the ASCII
encodings (if possible) of the elements of the input array. The two arrays
must be the same size (though not necessarily the same shape).
This is like an inplace version of `np.char.encode` though simpler since
it's only limited to ASCII, and hence the size of each character is
guaranteed to be 1 byte.
If any strings are non-ASCII an UnicodeArrayEncodeError is raised--this is
just a `UnicodeEncodeError` with an additional attribute for the index of
the item that couldn't be encoded.
"""
out_dtype = np.dtype((f"S{inarray.dtype.itemsize // 4}", inarray.dtype.shape))
if out is not None:
out = out.view(out_dtype)
op_dtypes = [inarray.dtype, out_dtype]
op_flags = [["readonly"], ["writeonly", "allocate"]]
it = np.nditer(
[inarray, out], op_dtypes=op_dtypes, op_flags=op_flags, flags=["zerosize_ok"]
)
try:
for initem, outitem in it:
outitem[...] = initem.item().encode("ascii")
except UnicodeEncodeError as exc:
index = np.unravel_index(it.iterindex, inarray.shape)
raise _UnicodeArrayEncodeError(*(exc.args + (index,)))
return it.operands[1]
def _has_unicode_fields(array):
"""
Returns True if any fields in a structured array have Unicode dtype.
"""
dtypes = (d[0] for d in array.dtype.fields.values())
return any(d.kind == "U" for d in dtypes)
|
e790ca2dc158b7b9fac25a32f64e3fdfab43fa1029c23a2af030af0143cff576 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import re
import warnings
from collections import OrderedDict, defaultdict
import numpy as np
from astropy import units as u
from astropy.coordinates import EarthLocation
from astropy.table import Column, MaskedColumn
from astropy.table.column import col_copy
from astropy.time import Time, TimeDelta
from astropy.time.core import BARYCENTRIC_SCALES
from astropy.time.formats import FITS_DEPRECATED_SCALES
from astropy.utils.exceptions import AstropyUserWarning
from . import Card, Header
# The following is based on the FITS WCS Paper IV, "Representations of time
# coordinates in FITS".
# https://ui.adsabs.harvard.edu/abs/2015A%26A...574A..36R
# FITS WCS standard specified "4-3" form for non-linear coordinate types
TCTYP_RE_TYPE = re.compile(r"(?P<type>[A-Z]+)[-]+")
TCTYP_RE_ALGO = re.compile(r"(?P<algo>[A-Z]+)\s*")
# FITS Time standard specified time units
FITS_TIME_UNIT = ["s", "d", "a", "cy", "min", "h", "yr", "ta", "Ba"]
# Global time reference coordinate keywords
TIME_KEYWORDS = (
"TIMESYS",
"MJDREF",
"JDREF",
"DATEREF",
"TREFPOS",
"TREFDIR",
"TIMEUNIT",
"TIMEOFFS",
"OBSGEO-X",
"OBSGEO-Y",
"OBSGEO-Z",
"OBSGEO-L",
"OBSGEO-B",
"OBSGEO-H",
"DATE",
"DATE-OBS",
"DATE-AVG",
"DATE-BEG",
"DATE-END",
"MJD-OBS",
"MJD-AVG",
"MJD-BEG",
"MJD-END",
)
# Column-specific time override keywords
COLUMN_TIME_KEYWORDS = ("TCTYP", "TCUNI", "TRPOS")
# Column-specific keywords regex
COLUMN_TIME_KEYWORD_REGEXP = f"({'|'.join(COLUMN_TIME_KEYWORDS)})[0-9]+"
def is_time_column_keyword(keyword):
"""
Check if the FITS header keyword is a time column-specific keyword.
Parameters
----------
keyword : str
FITS keyword.
"""
return re.match(COLUMN_TIME_KEYWORD_REGEXP, keyword) is not None
# Set astropy time global information
GLOBAL_TIME_INFO = {
"TIMESYS": ("UTC", "Default time scale"),
"JDREF": (0.0, "Time columns are jd = jd1 + jd2"),
"TREFPOS": ("TOPOCENTER", "Time reference position"),
}
def _verify_global_info(global_info):
"""
Given the global time reference frame information, verify that
each global time coordinate attribute will be given a valid value.
Parameters
----------
global_info : dict
Global time reference frame information.
"""
# Translate FITS deprecated scale into astropy scale, or else just convert
# to lower case for further checks.
global_info["scale"] = FITS_DEPRECATED_SCALES.get(
global_info["TIMESYS"], global_info["TIMESYS"].lower()
)
# Verify global time scale
if global_info["scale"] not in Time.SCALES:
# 'GPS' and 'LOCAL' are FITS recognized time scale values
# but are not supported by astropy.
if global_info["scale"] == "gps":
warnings.warn(
"Global time scale (TIMESYS) has a FITS recognized time scale "
'value "GPS". In Astropy, "GPS" is a time from epoch format '
"which runs synchronously with TAI; GPS is approximately 19 s "
"ahead of TAI. Hence, this format will be used.",
AstropyUserWarning,
)
# Assume that the values are in GPS format
global_info["scale"] = "tai"
global_info["format"] = "gps"
if global_info["scale"] == "local":
warnings.warn(
"Global time scale (TIMESYS) has a FITS recognized time scale "
'value "LOCAL". However, the standard states that "LOCAL" should be '
"tied to one of the existing scales because it is intrinsically "
"unreliable and/or ill-defined. Astropy will thus use the default "
'global time scale "UTC" instead of "LOCAL".',
AstropyUserWarning,
)
# Default scale 'UTC'
global_info["scale"] = "utc"
global_info["format"] = None
else:
raise AssertionError(
"Global time scale (TIMESYS) should have a FITS recognized "
"time scale value (got {!r}). The FITS standard states that "
"the use of local time scales should be restricted to alternate "
"coordinates.".format(global_info["TIMESYS"])
)
else:
# Scale is already set
global_info["format"] = None
# Check if geocentric global location is specified
obs_geo = [
global_info[attr]
for attr in ("OBSGEO-X", "OBSGEO-Y", "OBSGEO-Z")
if attr in global_info
]
# Location full specification is (X, Y, Z)
if len(obs_geo) == 3:
global_info["location"] = EarthLocation.from_geocentric(*obs_geo, unit=u.m)
else:
# Check if geodetic global location is specified (since geocentric failed)
# First warn the user if geocentric location is partially specified
if obs_geo:
warnings.warn(
"The geocentric observatory location {} is not completely "
"specified (X, Y, Z) and will be ignored.".format(obs_geo),
AstropyUserWarning,
)
# Check geodetic location
obs_geo = [
global_info[attr]
for attr in ("OBSGEO-L", "OBSGEO-B", "OBSGEO-H")
if attr in global_info
]
if len(obs_geo) == 3:
global_info["location"] = EarthLocation.from_geodetic(*obs_geo)
else:
# Since both geocentric and geodetic locations are not specified,
# location will be None.
# Warn the user if geodetic location is partially specified
if obs_geo:
warnings.warn(
"The geodetic observatory location {} is not completely "
"specified (lon, lat, alt) and will be ignored.".format(obs_geo),
AstropyUserWarning,
)
global_info["location"] = None
# Get global time reference
# Keywords are listed in order of precedence, as stated by the standard
for key, format_ in (("MJDREF", "mjd"), ("JDREF", "jd"), ("DATEREF", "fits")):
if key in global_info:
global_info["ref_time"] = {"val": global_info[key], "format": format_}
break
else:
# If none of the three keywords is present, MJDREF = 0.0 must be assumed
global_info["ref_time"] = {"val": 0, "format": "mjd"}
def _verify_column_info(column_info, global_info):
"""
Given the column-specific time reference frame information, verify that
each column-specific time coordinate attribute has a valid value.
Return True if the coordinate column is time, or else return False.
Parameters
----------
global_info : dict
Global time reference frame information.
column_info : dict
Column-specific time reference frame override information.
"""
scale = column_info.get("TCTYP", None)
unit = column_info.get("TCUNI", None)
location = column_info.get("TRPOS", None)
if scale is not None:
# Non-linear coordinate types have "4-3" form and are not time coordinates
if TCTYP_RE_TYPE.match(scale[:5]) and TCTYP_RE_ALGO.match(scale[5:]):
return False
elif scale.lower() in Time.SCALES:
column_info["scale"] = scale.lower()
column_info["format"] = None
elif scale in FITS_DEPRECATED_SCALES.keys():
column_info["scale"] = FITS_DEPRECATED_SCALES[scale]
column_info["format"] = None
# TCTYPn (scale) = 'TIME' indicates that the column scale is
# controlled by the global scale.
elif scale == "TIME":
column_info["scale"] = global_info["scale"]
column_info["format"] = global_info["format"]
elif scale == "GPS":
warnings.warn(
'Table column "{}" has a FITS recognized time scale value "GPS". '
'In Astropy, "GPS" is a time from epoch format which runs '
"synchronously with TAI; GPS runs ahead of TAI approximately "
"by 19 s. Hence, this format will be used.".format(column_info),
AstropyUserWarning,
)
column_info["scale"] = "tai"
column_info["format"] = "gps"
elif scale == "LOCAL":
warnings.warn(
'Table column "{}" has a FITS recognized time scale value "LOCAL". '
'However, the standard states that "LOCAL" should be tied to one '
"of the existing scales because it is intrinsically unreliable "
"and/or ill-defined. Astropy will thus use the global time scale "
"(TIMESYS) as the default.".format(column_info),
AstropyUserWarning,
)
column_info["scale"] = global_info["scale"]
column_info["format"] = global_info["format"]
else:
# Coordinate type is either an unrecognized local time scale
# or a linear coordinate type
return False
# If TCUNIn is a time unit or TRPOSn is specified, the column is a time
# coordinate. This has to be tested since TCTYP (scale) is not specified.
elif (unit is not None and unit in FITS_TIME_UNIT) or location is not None:
column_info["scale"] = global_info["scale"]
column_info["format"] = global_info["format"]
# None of the conditions for time coordinate columns is satisfied
else:
return False
# Check if column-specific reference position TRPOSn is specified
if location is not None:
# Observatory position (location) needs to be specified only
# for 'TOPOCENTER'.
if location == "TOPOCENTER":
column_info["location"] = global_info["location"]
if column_info["location"] is None:
warnings.warn(
'Time column reference position "TRPOSn" value is "TOPOCENTER". '
"However, the observatory position is not properly specified. "
"The FITS standard does not support this and hence reference "
"position will be ignored.",
AstropyUserWarning,
)
else:
column_info["location"] = None
# Warn user about ignoring global reference position when TRPOSn is
# not specified
elif global_info["TREFPOS"] == "TOPOCENTER":
if global_info["location"] is not None:
warnings.warn(
'Time column reference position "TRPOSn" is not specified. The '
'default value for it is "TOPOCENTER", and the observatory position '
"has been specified. However, for supporting column-specific location, "
"reference position will be ignored for this column.",
AstropyUserWarning,
)
column_info["location"] = None
else:
column_info["location"] = None
# Get reference time
column_info["ref_time"] = global_info["ref_time"]
return True
def _get_info_if_time_column(col, global_info):
"""
Check if a column without corresponding time column keywords in the
FITS header represents time or not. If yes, return the time column
information needed for its conversion to Time.
This is only applicable to the special-case where a column has the
name 'TIME' and a time unit.
"""
# Column with TTYPEn = 'TIME' and lacking any TC*n or time
# specific keywords will be controlled by the global keywords.
if col.info.name.upper() == "TIME" and col.info.unit in FITS_TIME_UNIT:
column_info = {
"scale": global_info["scale"],
"format": global_info["format"],
"ref_time": global_info["ref_time"],
"location": None,
}
if global_info["TREFPOS"] == "TOPOCENTER":
column_info["location"] = global_info["location"]
if column_info["location"] is None:
warnings.warn(
'Time column "{}" reference position will be ignored '
"due to unspecified observatory position.".format(col.info.name),
AstropyUserWarning,
)
return column_info
return None
def _convert_global_time(table, global_info):
"""
Convert the table metadata for time informational keywords
to astropy Time.
Parameters
----------
table : `~astropy.table.Table`
The table whose time metadata is to be converted.
global_info : dict
Global time reference frame information.
"""
# Read in Global Informational keywords as Time
for key, value in global_info.items():
# FITS uses a subset of ISO-8601 for DATE-xxx
if key not in table.meta:
try:
table.meta[key] = _convert_time_key(global_info, key)
except ValueError:
pass
def _convert_time_key(global_info, key):
"""
Convert a time metadata key to a Time object.
Parameters
----------
global_info : dict
Global time reference frame information.
key : str
Time key.
Returns
-------
astropy.time.Time
Raises
------
ValueError
If key is not a valid global time keyword.
"""
value = global_info[key]
if key.startswith("DATE"):
scale = "utc" if key == "DATE" else global_info["scale"]
precision = len(value.split(".")[-1]) if "." in value else 0
return Time(value, format="fits", scale=scale, precision=precision)
# MJD-xxx in MJD according to TIMESYS
elif key.startswith("MJD-"):
return Time(value, format="mjd", scale=global_info["scale"])
else:
raise ValueError("Key is not a valid global time keyword")
def _convert_time_column(col, column_info):
"""
Convert time columns to astropy Time columns.
Parameters
----------
col : `~astropy.table.Column`
The time coordinate column to be converted to Time.
column_info : dict
Column-specific time reference frame override information.
"""
# The code might fail while attempting to read FITS files not written by astropy.
try:
# ISO-8601 is the only string representation of time in FITS
if col.info.dtype.kind in ["S", "U"]:
# [+/-C]CCYY-MM-DD[Thh:mm:ss[.s...]] where the number of characters
# from index 20 to the end of string represents the precision
precision = max(int(col.info.dtype.str[2:]) - 20, 0)
return Time(
col,
format="fits",
scale=column_info["scale"],
precision=precision,
location=column_info["location"],
)
if column_info["format"] == "gps":
return Time(col, format="gps", location=column_info["location"])
# If reference value is 0 for JD or MJD, the column values can be
# directly converted to Time, as they are absolute (relative
# to a globally accepted zero point).
if column_info["ref_time"]["val"] == 0 and column_info["ref_time"][
"format"
] in ["jd", "mjd"]:
# (jd1, jd2) where jd = jd1 + jd2
if col.shape[-1] == 2 and col.ndim > 1:
return Time(
col[..., 0],
col[..., 1],
scale=column_info["scale"],
format=column_info["ref_time"]["format"],
location=column_info["location"],
)
else:
return Time(
col,
scale=column_info["scale"],
format=column_info["ref_time"]["format"],
location=column_info["location"],
)
# Reference time
ref_time = Time(
column_info["ref_time"]["val"],
scale=column_info["scale"],
format=column_info["ref_time"]["format"],
location=column_info["location"],
)
# Elapsed time since reference time
if col.shape[-1] == 2 and col.ndim > 1:
delta_time = TimeDelta(col[..., 0], col[..., 1])
else:
delta_time = TimeDelta(col)
return ref_time + delta_time
except Exception as err:
warnings.warn(
'The exception "{}" was encountered while trying to convert the time '
'column "{}" to Astropy Time.'.format(err, col.info.name),
AstropyUserWarning,
)
return col
def fits_to_time(hdr, table):
"""
Read FITS binary table time columns as `~astropy.time.Time`.
This method reads the metadata associated with time coordinates, as
stored in a FITS binary table header, converts time columns into
`~astropy.time.Time` columns and reads global reference times as
`~astropy.time.Time` instances.
Parameters
----------
hdr : `~astropy.io.fits.header.Header`
FITS Header
table : `~astropy.table.Table`
The table whose time columns are to be read as Time
Returns
-------
hdr : `~astropy.io.fits.header.Header`
Modified FITS Header (time metadata removed)
"""
# Set defaults for global time scale, reference, etc.
global_info = {"TIMESYS": "UTC", "TREFPOS": "TOPOCENTER"}
# Set default dictionary for time columns
time_columns = defaultdict(OrderedDict)
# Make a "copy" (not just a view) of the input header, since it
# may get modified. the data is still a "view" (for now)
hcopy = hdr.copy(strip=True)
# Scan the header for global and column-specific time keywords
for key, value, comment in hdr.cards:
if key in TIME_KEYWORDS:
global_info[key] = value
hcopy.remove(key)
elif is_time_column_keyword(key):
base, idx = re.match(r"([A-Z]+)([0-9]+)", key).groups()
time_columns[int(idx)][base] = value
hcopy.remove(key)
elif value in ("OBSGEO-X", "OBSGEO-Y", "OBSGEO-Z") and re.match(
"TTYPE[0-9]+", key
):
global_info[value] = table[value]
# Verify and get the global time reference frame information
_verify_global_info(global_info)
_convert_global_time(table, global_info)
# Columns with column-specific time (coordinate) keywords
if time_columns:
for idx, column_info in time_columns.items():
# Check if the column is time coordinate (not spatial)
if _verify_column_info(column_info, global_info):
colname = table.colnames[idx - 1]
# Convert to Time
table[colname] = _convert_time_column(table[colname], column_info)
# Check for special-cases of time coordinate columns
for idx, colname in enumerate(table.colnames):
if (idx + 1) not in time_columns:
column_info = _get_info_if_time_column(table[colname], global_info)
if column_info:
table[colname] = _convert_time_column(table[colname], column_info)
return hcopy
def time_to_fits(table):
"""
Replace Time columns in a Table with non-mixin columns containing
each element as a vector of two doubles (jd1, jd2) and return a FITS
header with appropriate time coordinate keywords.
jd = jd1 + jd2 represents time in the Julian Date format with
high-precision.
Parameters
----------
table : `~astropy.table.Table`
The table whose Time columns are to be replaced.
Returns
-------
table : `~astropy.table.Table`
The table with replaced Time columns
hdr : `~astropy.io.fits.header.Header`
Header containing global time reference frame FITS keywords
"""
# Make a light copy of table (to the extent possible) and clear any indices along
# the way. Indices are not serialized and cause problems later, but they are not
# needed here so just drop. For Column subclasses take advantage of copy() method,
# but for others it is required to actually copy the data if there are attached
# indices. See #8077 and #9009 for further discussion.
new_cols = []
for col in table.itercols():
if isinstance(col, Column):
new_col = col.copy(copy_data=False) # Also drops any indices
else:
new_col = col_copy(col, copy_indices=False) if col.info.indices else col
new_cols.append(new_col)
newtable = table.__class__(new_cols, copy=False)
newtable.meta = table.meta
# Global time coordinate frame keywords
hdr = Header(
[
Card(keyword=key, value=val[0], comment=val[1])
for key, val in GLOBAL_TIME_INFO.items()
]
)
# Store coordinate column-specific metadata
newtable.meta["__coordinate_columns__"] = defaultdict(OrderedDict)
coord_meta = newtable.meta["__coordinate_columns__"]
time_cols = table.columns.isinstance(Time)
# Geocentric location
location = None
for col in time_cols:
# By default, Time objects are written in full precision, i.e. we store both
# jd1 and jd2 (serialize_method['fits'] = 'jd1_jd2'). Formatted values for
# Time can be stored if the user explicitly chooses to do so.
col_cls = MaskedColumn if col.masked else Column
if col.info.serialize_method["fits"] == "formatted_value":
newtable.replace_column(col.info.name, col_cls(col.value))
continue
# The following is necessary to deal with multi-dimensional ``Time`` objects
# (i.e. where Time.shape is non-trivial).
jd12 = np.stack([col.jd1, col.jd2], axis=-1)
# Roll the 0th (innermost) axis backwards, until it lies in the last position
# (jd12.ndim)
newtable.replace_column(col.info.name, col_cls(jd12, unit="d"))
# Time column-specific override keywords
coord_meta[col.info.name]["coord_type"] = col.scale.upper()
coord_meta[col.info.name]["coord_unit"] = "d"
# Time column reference position
if getattr(col, "location") is None:
coord_meta[col.info.name]["time_ref_pos"] = None
if location is not None:
warnings.warn(
'Time Column "{}" has no specified location, but global Time '
"Position is present, which will be the default for this column "
"in FITS specification.".format(col.info.name),
AstropyUserWarning,
)
else:
coord_meta[col.info.name]["time_ref_pos"] = "TOPOCENTER"
# Compatibility of Time Scales and Reference Positions
if col.scale in BARYCENTRIC_SCALES:
warnings.warn(
'Earth Location "TOPOCENTER" for Time Column "{}" is incompatible '
'with scale "{}".'.format(col.info.name, col.scale.upper()),
AstropyUserWarning,
)
if location is None:
# Set global geocentric location
location = col.location
if location.size > 1:
for dim in ("x", "y", "z"):
newtable.add_column(
Column(getattr(location, dim).to_value(u.m)),
name=f"OBSGEO-{dim.upper()}",
)
else:
hdr.extend(
[
Card(
keyword=f"OBSGEO-{dim.upper()}",
value=getattr(location, dim).to_value(u.m),
)
for dim in ("x", "y", "z")
]
)
elif np.any(location != col.location):
raise ValueError(
"Multiple Time Columns with different geocentric "
"observatory locations ({}, {}) encountered."
"This is not supported by the FITS standard.".format(
location, col.location
)
)
return newtable, hdr
|
0ac99c35538d0497c9f496c0954493fc74f3ded4566598c4a327878ac59f4767 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
"""
Convenience functions
=====================
The functions in this module provide shortcuts for some of the most basic
operations on FITS files, such as reading and updating the header. They are
included directly in the 'astropy.io.fits' namespace so that they can be used
like::
astropy.io.fits.getheader(...)
These functions are primarily for convenience when working with FITS files in
the command-line interpreter. If performing several operations on the same
file, such as in a script, it is better to *not* use these functions, as each
one must open and re-parse the file. In such cases it is better to use
:func:`astropy.io.fits.open` and work directly with the
:class:`astropy.io.fits.HDUList` object and underlying HDU objects.
Several of the convenience functions, such as `getheader` and `getdata` support
special arguments for selecting which HDU to use when working with a
multi-extension FITS file. There are a few supported argument formats for
selecting the HDU. See the documentation for `getdata` for an
explanation of all the different formats.
.. warning::
All arguments to convenience functions other than the filename that are
*not* for selecting the HDU should be passed in as keyword
arguments. This is to avoid ambiguity and conflicts with the
HDU arguments. For example, to set NAXIS=1 on the Primary HDU:
Wrong::
astropy.io.fits.setval('myimage.fits', 'NAXIS', 1)
The above example will try to set the NAXIS value on the first extension
HDU to blank. That is, the argument '1' is assumed to specify an
HDU.
Right::
astropy.io.fits.setval('myimage.fits', 'NAXIS', value=1)
This will set the NAXIS keyword to 1 on the primary HDU (the default). To
specify the first extension HDU use::
astropy.io.fits.setval('myimage.fits', 'NAXIS', value=1, ext=1)
This complexity arises out of the attempt to simultaneously support
multiple argument formats that were used in past versions of PyFITS.
Unfortunately, it is not possible to support all formats without
introducing some ambiguity. A future Astropy release may standardize
around a single format and officially deprecate the other formats.
"""
import operator
import os
import warnings
import numpy as np
from astropy.utils.exceptions import AstropyUserWarning
from .diff import FITSDiff, HDUDiff
from .file import FILE_MODES, _File
from .hdu.base import _BaseHDU, _ValidHDU
from .hdu.hdulist import HDUList, fitsopen
from .hdu.image import ImageHDU, PrimaryHDU
from .hdu.table import BinTableHDU
from .header import Header
from .util import (
_is_dask_array,
_is_int,
fileobj_closed,
fileobj_mode,
fileobj_name,
path_like,
)
__all__ = [
"getheader",
"getdata",
"getval",
"setval",
"delval",
"writeto",
"append",
"update",
"info",
"tabledump",
"tableload",
"table_to_hdu",
"printdiff",
]
def getheader(filename, *args, **kwargs):
"""
Get the header from an HDU of a FITS file.
Parameters
----------
filename : path-like or file-like
File to get header from. If an opened file object, its mode
must be one of the following rb, rb+, or ab+).
ext, extname, extver
The rest of the arguments are for HDU specification. See the
`getdata` documentation for explanations/examples.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
Returns
-------
header : `Header` object
"""
mode, closed = _get_file_mode(filename)
hdulist, extidx = _getext(filename, mode, *args, **kwargs)
try:
hdu = hdulist[extidx]
header = hdu.header
finally:
hdulist.close(closed=closed)
return header
def getdata(filename, *args, header=None, lower=None, upper=None, view=None, **kwargs):
"""
Get the data from an HDU of a FITS file (and optionally the
header).
Parameters
----------
filename : path-like or file-like
File to get data from. If opened, mode must be one of the
following rb, rb+, or ab+.
ext
The rest of the arguments are for HDU specification.
They are flexible and are best illustrated by examples.
No extra arguments implies the primary HDU::
getdata('in.fits')
.. note::
Exclusive to ``getdata``: if ``ext`` is not specified
and primary header contains no data, ``getdata`` attempts
to retrieve data from first extension HDU.
By HDU number::
getdata('in.fits', 0) # the primary HDU
getdata('in.fits', 2) # the second extension HDU
getdata('in.fits', ext=2) # the second extension HDU
By name, i.e., ``EXTNAME`` value (if unique)::
getdata('in.fits', 'sci')
getdata('in.fits', extname='sci') # equivalent
Note ``EXTNAME`` values are not case sensitive
By combination of ``EXTNAME`` and EXTVER`` as separate
arguments or as a tuple::
getdata('in.fits', 'sci', 2) # EXTNAME='SCI' & EXTVER=2
getdata('in.fits', extname='sci', extver=2) # equivalent
getdata('in.fits', ('sci', 2)) # equivalent
Ambiguous or conflicting specifications will raise an exception::
getdata('in.fits', ext=('sci',1), extname='err', extver=2)
header : bool, optional
If `True`, return the data and the header of the specified HDU as a
tuple.
lower, upper : bool, optional
If ``lower`` or ``upper`` are `True`, the field names in the
returned data object will be converted to lower or upper case,
respectively.
view : ndarray, optional
When given, the data will be returned wrapped in the given ndarray
subclass by calling::
data.view(view)
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
Returns
-------
array : ndarray or `~numpy.recarray` or `~astropy.io.fits.Group`
Type depends on the type of the extension being referenced.
If the optional keyword ``header`` is set to `True`, this
function will return a (``data``, ``header``) tuple.
Raises
------
IndexError
If no data is found in searched HDUs.
"""
mode, closed = _get_file_mode(filename)
ext = kwargs.get("ext")
extname = kwargs.get("extname")
extver = kwargs.get("extver")
ext_given = not (
len(args) == 0 and ext is None and extname is None and extver is None
)
hdulist, extidx = _getext(filename, mode, *args, **kwargs)
try:
hdu = hdulist[extidx]
data = hdu.data
if data is None:
if ext_given:
raise IndexError(f"No data in HDU #{extidx}.")
# fallback to the first extension HDU
if len(hdulist) == 1:
raise IndexError("No data in Primary HDU and no extension HDU found.")
hdu = hdulist[1]
data = hdu.data
if data is None:
raise IndexError("No data in either Primary or first extension HDUs.")
if header:
hdr = hdu.header
finally:
hdulist.close(closed=closed)
# Change case of names if requested
trans = None
if lower:
trans = operator.methodcaller("lower")
elif upper:
trans = operator.methodcaller("upper")
if trans:
if data.dtype.names is None:
# this data does not have fields
return
if data.dtype.descr[0][0] == "":
# this data does not have fields
return
data.dtype.names = [trans(n) for n in data.dtype.names]
# allow different views into the underlying ndarray. Keep the original
# view just in case there is a problem
if isinstance(view, type) and issubclass(view, np.ndarray):
data = data.view(view)
if header:
return data, hdr
else:
return data
def getval(filename, keyword, *args, **kwargs):
"""
Get a keyword's value from a header in a FITS file.
Parameters
----------
filename : path-like or file-like
Name of the FITS file, or file object (if opened, mode must be
one of the following rb, rb+, or ab+).
keyword : str
Keyword name
ext, extname, extver
The rest of the arguments are for HDU specification.
See `getdata` for explanations/examples.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function automatically specifies ``do_not_scale_image_data
= True`` when opening the file so that values can be retrieved from the
unmodified header.
Returns
-------
keyword value : str, int, or float
"""
if "do_not_scale_image_data" not in kwargs:
kwargs["do_not_scale_image_data"] = True
hdr = getheader(filename, *args, **kwargs)
return hdr[keyword]
def setval(
filename,
keyword,
*args,
value=None,
comment=None,
before=None,
after=None,
savecomment=False,
**kwargs,
):
"""
Set a keyword's value from a header in a FITS file.
If the keyword already exists, it's value/comment will be updated.
If it does not exist, a new card will be created and it will be
placed before or after the specified location. If no ``before`` or
``after`` is specified, it will be appended at the end.
When updating more than one keyword in a file, this convenience
function is a much less efficient approach compared with opening
the file for update, modifying the header, and closing the file.
Parameters
----------
filename : path-like or file-like
Name of the FITS file, or file object If opened, mode must be update
(rb+). An opened file object or `~gzip.GzipFile` object will be closed
upon return.
keyword : str
Keyword name
value : str, int, float, optional
Keyword value (default: `None`, meaning don't modify)
comment : str, optional
Keyword comment, (default: `None`, meaning don't modify)
before : str, int, optional
Name of the keyword, or index of the card before which the new card
will be placed. The argument ``before`` takes precedence over
``after`` if both are specified (default: `None`).
after : str, int, optional
Name of the keyword, or index of the card after which the new card will
be placed. (default: `None`).
savecomment : bool, optional
When `True`, preserve the current comment for an existing keyword. The
argument ``savecomment`` takes precedence over ``comment`` if both
specified. If ``comment`` is not specified then the current comment
will automatically be preserved (default: `False`).
ext, extname, extver
The rest of the arguments are for HDU specification.
See `getdata` for explanations/examples.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function automatically specifies ``do_not_scale_image_data
= True`` when opening the file so that values can be retrieved from the
unmodified header.
"""
if "do_not_scale_image_data" not in kwargs:
kwargs["do_not_scale_image_data"] = True
closed = fileobj_closed(filename)
hdulist, extidx = _getext(filename, "update", *args, **kwargs)
try:
if keyword in hdulist[extidx].header and savecomment:
comment = None
hdulist[extidx].header.set(keyword, value, comment, before, after)
finally:
hdulist.close(closed=closed)
def delval(filename, keyword, *args, **kwargs):
"""
Delete all instances of keyword from a header in a FITS file.
Parameters
----------
filename : path-like or file-like
Name of the FITS file, or file object If opened, mode must be update
(rb+). An opened file object or `~gzip.GzipFile` object will be closed
upon return.
keyword : str, int
Keyword name or index
ext, extname, extver
The rest of the arguments are for HDU specification.
See `getdata` for explanations/examples.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function automatically specifies ``do_not_scale_image_data
= True`` when opening the file so that values can be retrieved from the
unmodified header.
"""
if "do_not_scale_image_data" not in kwargs:
kwargs["do_not_scale_image_data"] = True
closed = fileobj_closed(filename)
hdulist, extidx = _getext(filename, "update", *args, **kwargs)
try:
del hdulist[extidx].header[keyword]
finally:
hdulist.close(closed=closed)
def writeto(
filename,
data,
header=None,
output_verify="exception",
overwrite=False,
checksum=False,
):
"""
Create a new FITS file using the supplied data/header.
Parameters
----------
filename : path-like or file-like
File to write to. If opened, must be opened in a writable binary
mode such as 'wb' or 'ab+'.
data : array or `~numpy.recarray` or `~astropy.io.fits.Group`
data to write to the new file
header : `Header` object, optional
the header associated with ``data``. If `None`, a header
of the appropriate type is created for the supplied data. This
argument is optional.
output_verify : str
Output verification option. Must be one of ``"fix"``, ``"silentfix"``,
``"ignore"``, ``"warn"``, or ``"exception"``. May also be any
combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``,
``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See
:ref:`astropy:verify` for more info.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
checksum : bool, optional
If `True`, adds both ``DATASUM`` and ``CHECKSUM`` cards to the
headers of all HDU's written to the file.
"""
hdu = _makehdu(data, header)
if hdu.is_image and not isinstance(hdu, PrimaryHDU):
hdu = PrimaryHDU(data, header=header)
hdu.writeto(
filename, overwrite=overwrite, output_verify=output_verify, checksum=checksum
)
def table_to_hdu(table, character_as_bytes=False):
"""
Convert an `~astropy.table.Table` object to a FITS
`~astropy.io.fits.BinTableHDU`.
Parameters
----------
table : astropy.table.Table
The table to convert.
character_as_bytes : bool
Whether to return bytes for string columns when accessed from the HDU.
By default this is `False` and (unicode) strings are returned, but for
large tables this may use up a lot of memory.
Returns
-------
table_hdu : `~astropy.io.fits.BinTableHDU`
The FITS binary table HDU.
"""
# Avoid circular imports
from .column import python_to_tdisp
from .connect import REMOVE_KEYWORDS, is_column_keyword
# Header to store Time related metadata
hdr = None
# Not all tables with mixin columns are supported
if table.has_mixin_columns:
# Import is done here, in order to avoid it at build time as erfa is not
# yet available then.
from astropy.table.column import BaseColumn
from astropy.time import Time
from astropy.units import Quantity
from .fitstime import time_to_fits
# Only those columns which are instances of BaseColumn, Quantity or Time can
# be written
unsupported_cols = table.columns.not_isinstance((BaseColumn, Quantity, Time))
if unsupported_cols:
unsupported_names = [col.info.name for col in unsupported_cols]
raise ValueError(
f"cannot write table with mixin column(s) {unsupported_names}"
)
time_cols = table.columns.isinstance(Time)
if time_cols:
table, hdr = time_to_fits(table)
# Create a new HDU object
tarray = table.as_array()
if isinstance(tarray, np.ma.MaskedArray):
# Fill masked values carefully:
# float column's default mask value needs to be Nan and
# string column's default mask should be an empty string.
# Note: getting the fill value for the structured array is
# more reliable than for individual columns for string entries.
# (no 'N/A' for a single-element string, where it should be 'N').
default_fill_value = np.ma.default_fill_value(tarray.dtype)
for colname, (coldtype, _) in tarray.dtype.fields.items():
if np.all(tarray.fill_value[colname] == default_fill_value[colname]):
# Since multi-element columns with dtypes such as '2f8' have
# a subdtype, we should look up the type of column on that.
coltype = (
coldtype.subdtype[0].type if coldtype.subdtype else coldtype.type
)
if issubclass(coltype, np.complexfloating):
tarray.fill_value[colname] = complex(np.nan, np.nan)
elif issubclass(coltype, np.inexact):
tarray.fill_value[colname] = np.nan
elif issubclass(coltype, np.character):
tarray.fill_value[colname] = ""
# TODO: it might be better to construct the FITS table directly from
# the Table columns, rather than go via a structured array.
table_hdu = BinTableHDU.from_columns(
tarray.filled(), header=hdr, character_as_bytes=character_as_bytes
)
for col in table_hdu.columns:
# Binary FITS tables support TNULL *only* for integer data columns
# TODO: Determine a schema for handling non-integer masked columns
# with non-default fill values in FITS (if at all possible).
int_formats = ("B", "I", "J", "K")
if not (col.format in int_formats or col.format.p_format in int_formats):
continue
fill_value = tarray[col.name].fill_value
col.null = fill_value.astype(int)
else:
table_hdu = BinTableHDU.from_columns(
tarray, header=hdr, character_as_bytes=character_as_bytes
)
# Set units and format display for output HDU
for col in table_hdu.columns:
if table[col.name].info.format is not None:
# check for boolean types, special format case
logical = table[col.name].info.dtype == bool
tdisp_format = python_to_tdisp(
table[col.name].info.format, logical_dtype=logical
)
if tdisp_format is not None:
col.disp = tdisp_format
unit = table[col.name].unit
if unit is not None:
# Local imports to avoid importing units when it is not required,
# e.g. for command-line scripts
from astropy.units import Unit
from astropy.units.format.fits import UnitScaleError
try:
col.unit = unit.to_string(format="fits")
except UnitScaleError:
scale = unit.scale
raise UnitScaleError(
f"The column '{col.name}' could not be stored in FITS "
f"format because it has a scale '({str(scale)})' that "
"is not recognized by the FITS standard. Either scale "
"the data or change the units."
)
except ValueError:
# Warn that the unit is lost, but let the details depend on
# whether the column was serialized (because it was a
# quantity), since then the unit can be recovered by astropy.
warning = (
f"The unit '{unit.to_string()}' could not be saved in "
"native FITS format "
)
if any(
"SerializedColumn" in item and "name: " + col.name in item
for item in table.meta.get("comments", [])
):
warning += (
"and hence will be lost to non-astropy fits readers. "
"Within astropy, the unit can roundtrip using QTable, "
"though one has to enable the unit before reading."
)
else:
warning += (
"and cannot be recovered in reading. It can roundtrip "
"within astropy by using QTable both to write and read "
"back, though one has to enable the unit before reading."
)
warnings.warn(warning, AstropyUserWarning)
else:
# Try creating a Unit to issue a warning if the unit is not
# FITS compliant
Unit(col.unit, format="fits", parse_strict="warn")
# Column-specific override keywords for coordinate columns
coord_meta = table.meta.pop("__coordinate_columns__", {})
for col_name, col_info in coord_meta.items():
col = table_hdu.columns[col_name]
# Set the column coordinate attributes from data saved earlier.
# Note: have to set these, even if we have no data.
for attr in "coord_type", "coord_unit":
setattr(col, attr, col_info.get(attr, None))
trpos = col_info.get("time_ref_pos", None)
if trpos is not None:
setattr(col, "time_ref_pos", trpos)
for key, value in table.meta.items():
if is_column_keyword(key.upper()) or key.upper() in REMOVE_KEYWORDS:
warnings.warn(
f"Meta-data keyword {key} will be ignored since it conflicts "
"with a FITS reserved keyword",
AstropyUserWarning,
)
continue
# Convert to FITS format
if key == "comments":
key = "comment"
if isinstance(value, list):
for item in value:
try:
table_hdu.header.append((key, item))
except ValueError:
warnings.warn(
f"Attribute `{key}` of type {type(value)} cannot be "
"added to FITS Header - skipping",
AstropyUserWarning,
)
else:
try:
table_hdu.header[key] = value
except ValueError:
warnings.warn(
f"Attribute `{key}` of type {type(value)} cannot be "
"added to FITS Header - skipping",
AstropyUserWarning,
)
return table_hdu
def append(filename, data, header=None, checksum=False, verify=True, **kwargs):
"""
Append the header/data to FITS file if filename exists, create if not.
If only ``data`` is supplied, a minimal header is created.
Parameters
----------
filename : path-like or file-like
File to write to. If opened, must be opened for update (rb+) unless it
is a new file, then it must be opened for append (ab+). A file or
`~gzip.GzipFile` object opened for update will be closed after return.
data : array, :class:`~astropy.table.Table`, or `~astropy.io.fits.Group`
The new data used for appending.
header : `Header` object, optional
The header associated with ``data``. If `None`, an appropriate header
will be created for the data object supplied.
checksum : bool, optional
When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards to the header
of the HDU when written to the file.
verify : bool, optional
When `True`, the existing FITS file will be read in to verify it for
correctness before appending. When `False`, content is simply appended
to the end of the file. Setting ``verify`` to `False` can be much
faster.
**kwargs
Additional arguments are passed to:
- `~astropy.io.fits.writeto` if the file does not exist or is empty.
In this case ``output_verify`` is the only possible argument.
- `~astropy.io.fits.open` if ``verify`` is True or if ``filename``
is a file object.
- Otherwise no additional arguments can be used.
"""
if isinstance(filename, path_like):
filename = os.path.expanduser(filename)
name, closed, noexist_or_empty = _stat_filename_or_fileobj(filename)
if noexist_or_empty:
#
# The input file or file like object either doesn't exits or is
# empty. Use the writeto convenience function to write the
# output to the empty object.
#
writeto(filename, data, header, checksum=checksum, **kwargs)
else:
hdu = _makehdu(data, header)
if isinstance(hdu, PrimaryHDU):
hdu = ImageHDU(data, header)
if verify or not closed:
f = fitsopen(filename, mode="append", **kwargs)
try:
f.append(hdu)
# Set a flag in the HDU so that only this HDU gets a checksum
# when writing the file.
hdu._output_checksum = checksum
finally:
f.close(closed=closed)
else:
f = _File(filename, mode="append")
try:
hdu._output_checksum = checksum
hdu._writeto(f)
finally:
f.close()
def update(filename, data, *args, **kwargs):
"""
Update the specified HDU with the input data/header.
Parameters
----------
filename : path-like or file-like
File to update. If opened, mode must be update (rb+). An opened file
object or `~gzip.GzipFile` object will be closed upon return.
data : array, `~astropy.table.Table`, or `~astropy.io.fits.Group`
The new data used for updating.
header : `Header` object, optional
The header associated with ``data``. If `None`, an appropriate header
will be created for the data object supplied.
ext, extname, extver
The rest of the arguments are flexible: the 3rd argument can be the
header associated with the data. If the 3rd argument is not a
`Header`, it (and other positional arguments) are assumed to be the
HDU specification(s). Header and HDU specs can also be
keyword arguments. For example::
update(file, dat, hdr, 'sci') # update the 'sci' extension
update(file, dat, 3) # update the 3rd extension HDU
update(file, dat, hdr, 3) # update the 3rd extension HDU
update(file, dat, 'sci', 2) # update the 2nd extension HDU named 'sci'
update(file, dat, 3, header=hdr) # update the 3rd extension HDU
update(file, dat, header=hdr, ext=5) # update the 5th extension HDU
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
"""
# The arguments to this function are a bit trickier to deal with than others
# in this module, since the documentation has promised that the header
# argument can be an optional positional argument.
if args and isinstance(args[0], Header):
header = args[0]
args = args[1:]
else:
header = None
# The header can also be a keyword argument--if both are provided the
# keyword takes precedence
header = kwargs.pop("header", header)
new_hdu = _makehdu(data, header)
closed = fileobj_closed(filename)
hdulist, _ext = _getext(filename, "update", *args, **kwargs)
try:
hdulist[_ext] = new_hdu
finally:
hdulist.close(closed=closed)
def info(filename, output=None, **kwargs):
"""
Print the summary information on a FITS file.
This includes the name, type, length of header, data shape and type
for each HDU.
Parameters
----------
filename : path-like or file-like
FITS file to obtain info from. If opened, mode must be one of
the following: rb, rb+, or ab+ (i.e. the file must be readable).
output : file, bool, optional
A file-like object to write the output to. If ``False``, does not
output to a file and instead returns a list of tuples representing the
HDU info. Writes to ``sys.stdout`` by default.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function sets ``ignore_missing_end=True`` by default.
"""
mode, closed = _get_file_mode(filename, default="readonly")
# Set the default value for the ignore_missing_end parameter
if "ignore_missing_end" not in kwargs:
kwargs["ignore_missing_end"] = True
f = fitsopen(filename, mode=mode, **kwargs)
try:
ret = f.info(output=output)
finally:
if closed:
f.close()
return ret
def printdiff(inputa, inputb, *args, **kwargs):
"""
Compare two parts of a FITS file, including entire FITS files,
FITS `HDUList` objects and FITS ``HDU`` objects.
Parameters
----------
inputa : str, `HDUList` object, or ``HDU`` object
The filename of a FITS file, `HDUList`, or ``HDU``
object to compare to ``inputb``.
inputb : str, `HDUList` object, or ``HDU`` object
The filename of a FITS file, `HDUList`, or ``HDU``
object to compare to ``inputa``.
ext, extname, extver
Additional positional arguments are for HDU specification if your
inputs are string filenames (will not work if
``inputa`` and ``inputb`` are ``HDU`` objects or `HDUList` objects).
They are flexible and are best illustrated by examples. In addition
to using these arguments positionally you can directly call the
keyword parameters ``ext``, ``extname``.
By HDU number::
printdiff('inA.fits', 'inB.fits', 0) # the primary HDU
printdiff('inA.fits', 'inB.fits', 2) # the second extension HDU
printdiff('inA.fits', 'inB.fits', ext=2) # the second extension HDU
By name, i.e., ``EXTNAME`` value (if unique). ``EXTNAME`` values are
not case sensitive:
printdiff('inA.fits', 'inB.fits', 'sci')
printdiff('inA.fits', 'inB.fits', extname='sci') # equivalent
By combination of ``EXTNAME`` and ``EXTVER`` as separate
arguments or as a tuple::
printdiff('inA.fits', 'inB.fits', 'sci', 2) # EXTNAME='SCI'
# & EXTVER=2
printdiff('inA.fits', 'inB.fits', extname='sci', extver=2)
# equivalent
printdiff('inA.fits', 'inB.fits', ('sci', 2)) # equivalent
Ambiguous or conflicting specifications will raise an exception::
printdiff('inA.fits', 'inB.fits',
ext=('sci', 1), extname='err', extver=2)
**kwargs
Any additional keyword arguments to be passed to
`~astropy.io.fits.FITSDiff`.
Notes
-----
The primary use for the `printdiff` function is to allow quick print out
of a FITS difference report and will write to ``sys.stdout``.
To save the diff report to a file please use `~astropy.io.fits.FITSDiff`
directly.
"""
# Pop extension keywords
extension = {
key: kwargs.pop(key) for key in ["ext", "extname", "extver"] if key in kwargs
}
has_extensions = args or extension
if isinstance(inputa, str) and has_extensions:
# Use handy _getext to interpret any ext keywords, but
# will need to close a if fails
modea, closeda = _get_file_mode(inputa)
modeb, closedb = _get_file_mode(inputb)
hdulista, extidxa = _getext(inputa, modea, *args, **extension)
# Have to close a if b doesn't make it
try:
hdulistb, extidxb = _getext(inputb, modeb, *args, **extension)
except Exception:
hdulista.close(closed=closeda)
raise
try:
hdua = hdulista[extidxa]
hdub = hdulistb[extidxb]
# See below print for note
print(HDUDiff(hdua, hdub, **kwargs).report())
finally:
hdulista.close(closed=closeda)
hdulistb.close(closed=closedb)
# If input is not a string, can feed HDU objects or HDUList directly,
# but can't currently handle extensions
elif isinstance(inputa, _ValidHDU) and has_extensions:
raise ValueError("Cannot use extension keywords when providing an HDU object.")
elif isinstance(inputa, _ValidHDU) and not has_extensions:
print(HDUDiff(inputa, inputb, **kwargs).report())
elif isinstance(inputa, HDUList) and has_extensions:
raise NotImplementedError(
"Extension specification with HDUList objects not implemented."
)
# This function is EXCLUSIVELY for printing the diff report to screen
# in a one-liner call, hence the use of print instead of logging
else:
print(FITSDiff(inputa, inputb, **kwargs).report())
def tabledump(filename, datafile=None, cdfile=None, hfile=None, ext=1, overwrite=False):
"""
Dump a table HDU to a file in ASCII format. The table may be
dumped in three separate files, one containing column definitions,
one containing header parameters, and one for table data.
Parameters
----------
filename : path-like or file-like
Input fits file.
datafile : path-like or file-like, optional
Output data file. The default is the root name of the input
fits file appended with an underscore, followed by the
extension number (ext), followed by the extension ``.txt``.
cdfile : path-like or file-like, optional
Output column definitions file. The default is `None`,
no column definitions output is produced.
hfile : path-like or file-like, optional
Output header parameters file. The default is `None`,
no header parameters output is produced.
ext : int
The number of the extension containing the table HDU to be
dumped.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
Notes
-----
The primary use for the `tabledump` function is to allow editing in a
standard text editor of the table data and parameters. The
`tableload` function can be used to reassemble the table from the
three ASCII files.
"""
# allow file object to already be opened in any of the valid modes
# and leave the file in the same state (opened or closed) as when
# the function was called
mode, closed = _get_file_mode(filename, default="readonly")
f = fitsopen(filename, mode=mode)
# Create the default data file name if one was not provided
try:
if not datafile:
root, tail = os.path.splitext(f._file.name)
datafile = root + "_" + repr(ext) + ".txt"
# Dump the data from the HDU to the files
f[ext].dump(datafile, cdfile, hfile, overwrite)
finally:
if closed:
f.close()
if isinstance(tabledump.__doc__, str):
tabledump.__doc__ += BinTableHDU._tdump_file_format.replace("\n", "\n ")
def tableload(datafile, cdfile, hfile=None):
"""
Create a table from the input ASCII files. The input is from up
to three separate files, one containing column definitions, one
containing header parameters, and one containing column data. The
header parameters file is not required. When the header
parameters file is absent a minimal header is constructed.
Parameters
----------
datafile : path-like or file-like
Input data file containing the table data in ASCII format.
cdfile : path-like or file-like
Input column definition file containing the names, formats,
display formats, physical units, multidimensional array
dimensions, undefined values, scale factors, and offsets
associated with the columns in the table.
hfile : path-like or file-like, optional
Input parameter definition file containing the header
parameter definitions to be associated with the table.
If `None`, a minimal header is constructed.
Notes
-----
The primary use for the `tableload` function is to allow the input of
ASCII data that was edited in a standard text editor of the table
data and parameters. The tabledump function can be used to create the
initial ASCII files.
"""
return BinTableHDU.load(datafile, cdfile, hfile, replace=True)
if isinstance(tableload.__doc__, str):
tableload.__doc__ += BinTableHDU._tdump_file_format.replace("\n", "\n ")
def _getext(filename, mode, *args, ext=None, extname=None, extver=None, **kwargs):
"""
Open the input file, return the `HDUList` and the extension.
This supports several different styles of extension selection. See the
:func:`getdata()` documentation for the different possibilities.
"""
err_msg = "Redundant/conflicting extension arguments(s): {}".format(
{"args": args, "ext": ext, "extname": extname, "extver": extver}
)
# This code would be much simpler if just one way of specifying an
# extension were picked. But now we need to support all possible ways for
# the time being.
if len(args) == 1:
# Must be either an extension number, an extension name, or an
# (extname, extver) tuple
if _is_int(args[0]) or (isinstance(ext, tuple) and len(ext) == 2):
if ext is not None or extname is not None or extver is not None:
raise TypeError(err_msg)
ext = args[0]
elif isinstance(args[0], str):
# The first arg is an extension name; it could still be valid
# to provide an extver kwarg
if ext is not None or extname is not None:
raise TypeError(err_msg)
extname = args[0]
else:
# Take whatever we have as the ext argument; we'll validate it
# below
ext = args[0]
elif len(args) == 2:
# Must be an extname and extver
if ext is not None or extname is not None or extver is not None:
raise TypeError(err_msg)
extname = args[0]
extver = args[1]
elif len(args) > 2:
raise TypeError("Too many positional arguments.")
if ext is not None and not (
_is_int(ext)
or (
isinstance(ext, tuple)
and len(ext) == 2
and isinstance(ext[0], str)
and _is_int(ext[1])
)
):
raise ValueError(
"The ext keyword must be either an extension number "
"(zero-indexed) or a (extname, extver) tuple."
)
if extname is not None and not isinstance(extname, str):
raise ValueError("The extname argument must be a string.")
if extver is not None and not _is_int(extver):
raise ValueError("The extver argument must be an integer.")
if ext is None and extname is None and extver is None:
ext = 0
elif ext is not None and (extname is not None or extver is not None):
raise TypeError(err_msg)
elif extname:
if extver:
ext = (extname, extver)
else:
ext = (extname, 1)
elif extver and extname is None:
raise TypeError("extver alone cannot specify an extension.")
hdulist = fitsopen(filename, mode=mode, **kwargs)
return hdulist, ext
def _makehdu(data, header):
if header is None:
header = Header()
hdu = _BaseHDU._from_data(data, header)
if hdu.__class__ in (_BaseHDU, _ValidHDU):
# The HDU type was unrecognized, possibly due to a
# nonexistent/incomplete header
if (
isinstance(data, np.ndarray) and data.dtype.fields is not None
) or isinstance(data, np.recarray):
hdu = BinTableHDU(data, header=header)
elif isinstance(data, np.ndarray) or _is_dask_array(data):
hdu = ImageHDU(data, header=header)
else:
raise KeyError("Data must be a numpy array.")
return hdu
def _stat_filename_or_fileobj(filename):
if isinstance(filename, os.PathLike):
filename = os.fspath(filename)
closed = fileobj_closed(filename)
name = fileobj_name(filename) or ""
try:
loc = filename.tell()
except AttributeError:
loc = 0
noexist_or_empty = (
name and (not os.path.exists(name) or (os.path.getsize(name) == 0))
) or (not name and loc == 0)
return name, closed, noexist_or_empty
def _get_file_mode(filename, default="readonly"):
"""
Allow file object to already be opened in any of the valid modes and
and leave the file in the same state (opened or closed) as when
the function was called.
"""
mode = default
closed = fileobj_closed(filename)
fmode = fileobj_mode(filename)
if fmode is not None:
mode = FILE_MODES.get(fmode)
if mode is None:
raise OSError(
"File mode of the input file object ({!r}) cannot be used to "
"read/write FITS files.".format(fmode)
)
return mode, closed
|
61882c397fb08d3c980b92680075815376189d0acef188a29818638cc582f5ba | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import os
import sys
from collections import defaultdict
from glob import glob
import numpy
from setuptools import Extension
from extension_helpers import get_compiler, pkg_config
def _get_compression_extension():
debug = "--debug" in sys.argv
cfg = defaultdict(list)
cfg["include_dirs"].append(numpy.get_include())
cfg["sources"].append(
os.path.join(os.path.dirname(__file__), "src", "compressionmodule.c")
)
if int(os.environ.get("ASTROPY_USE_SYSTEM_CFITSIO", 0)) or int(
os.environ.get("ASTROPY_USE_SYSTEM_ALL", 0)
):
for k, v in pkg_config(["cfitsio"], ["cfitsio"]).items():
cfg[k].extend(v)
else:
if get_compiler() == "msvc":
# These come from the CFITSIO vcc makefile, except the last
# which ensures on windows we do not include unistd.h (in regular
# compilation of cfitsio, an empty file would be generated)
cfg["extra_compile_args"].extend(
[
"/D",
"WIN32",
"/D",
"_WINDOWS",
"/D",
"_MBCS",
"/D",
"_USRDLL",
"/D",
"_CRT_SECURE_NO_DEPRECATE",
"/D",
"YY_NO_UNISTD_H",
]
)
else:
cfg["extra_compile_args"].extend(["-Wno-declaration-after-statement"])
cfg["define_macros"].append(("HAVE_UNISTD_H", None))
if not debug:
# these switches are to silence warnings from compiling CFITSIO
# For full silencing, some are added that only are used in
# later versions of gcc (versions approximate; see #6474)
cfg["extra_compile_args"].extend(
[
"-Wno-strict-prototypes",
"-Wno-unused",
"-Wno-uninitialized",
"-Wno-unused-result", # gcc >~4.8
"-Wno-misleading-indentation", # gcc >~7.2
"-Wno-format-overflow", # gcc >~7.2
]
)
cfitsio_lib_path = os.path.join("cextern", "cfitsio", "lib")
cfitsio_zlib_path = os.path.join("cextern", "cfitsio", "zlib")
cfitsio_files = glob(os.path.join(cfitsio_lib_path, "*.c"))
cfitsio_zlib_files = glob(os.path.join(cfitsio_zlib_path, "*.c"))
cfg["include_dirs"].append(cfitsio_lib_path)
cfg["include_dirs"].append(cfitsio_zlib_path)
cfg["sources"].extend(cfitsio_files)
cfg["sources"].extend(cfitsio_zlib_files)
return Extension("astropy.io.fits.compression", **cfg)
def get_extensions():
return [_get_compression_extension()]
|
c6b4351e6749ba535f90b4f34eb20360cc8a698d45b688ed40f777d9649a6737 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import io
import itertools
import mmap
import operator
import os
import platform
import signal
import sys
import tempfile
import textwrap
import threading
import warnings
import weakref
from contextlib import contextmanager, suppress
from functools import wraps
import numpy as np
from packaging.version import Version
from astropy.utils import data
from astropy.utils.exceptions import AstropyUserWarning
path_like = (str, bytes, os.PathLike)
cmp = lambda a, b: (a > b) - (a < b)
all_integer_types = (int, np.integer)
class NotifierMixin:
"""
Mixin class that provides services by which objects can register
listeners to changes on that object.
All methods provided by this class are underscored, since this is intended
for internal use to communicate between classes in a generic way, and is
not machinery that should be exposed to users of the classes involved.
Use the ``_add_listener`` method to register a listener on an instance of
the notifier. This registers the listener with a weak reference, so if
no other references to the listener exist it is automatically dropped from
the list and does not need to be manually removed.
Call the ``_notify`` method on the notifier to update all listeners
upon changes. ``_notify('change_type', *args, **kwargs)`` results
in calling ``listener._update_change_type(*args, **kwargs)`` on all
listeners subscribed to that notifier.
If a particular listener does not have the appropriate update method
it is ignored.
Examples
--------
>>> class Widget(NotifierMixin):
... state = 1
... def __init__(self, name):
... self.name = name
... def update_state(self):
... self.state += 1
... self._notify('widget_state_changed', self)
...
>>> class WidgetListener:
... def _update_widget_state_changed(self, widget):
... print('Widget {0} changed state to {1}'.format(
... widget.name, widget.state))
...
>>> widget = Widget('fred')
>>> listener = WidgetListener()
>>> widget._add_listener(listener)
>>> widget.update_state()
Widget fred changed state to 2
"""
_listeners = None
def _add_listener(self, listener):
"""
Add an object to the list of listeners to notify of changes to this
object. This adds a weakref to the list of listeners that is
removed from the listeners list when the listener has no other
references to it.
"""
if self._listeners is None:
self._listeners = weakref.WeakValueDictionary()
self._listeners[id(listener)] = listener
def _remove_listener(self, listener):
"""
Removes the specified listener from the listeners list. This relies
on object identity (i.e. the ``is`` operator).
"""
if self._listeners is None:
return
with suppress(KeyError):
del self._listeners[id(listener)]
def _notify(self, notification, *args, **kwargs):
"""
Notify all listeners of some particular state change by calling their
``_update_<notification>`` method with the given ``*args`` and
``**kwargs``.
The notification does not by default include the object that actually
changed (``self``), but it certainly may if required.
"""
if self._listeners is None:
return
method_name = f"_update_{notification}"
for listener in self._listeners.valuerefs():
# Use valuerefs instead of itervaluerefs; see
# https://github.com/astropy/astropy/issues/4015
listener = listener() # dereference weakref
if listener is None:
continue
if hasattr(listener, method_name):
method = getattr(listener, method_name)
if callable(method):
method(*args, **kwargs)
def __getstate__(self):
"""
Exclude listeners when saving the listener's state, since they may be
ephemeral.
"""
# TODO: This hasn't come up often, but if anyone needs to pickle HDU
# objects it will be necessary when HDU objects' states are restored to
# re-register themselves as listeners on their new column instances.
try:
state = super().__getstate__()
except AttributeError:
# Chances are the super object doesn't have a getstate
state = self.__dict__.copy()
state["_listeners"] = None
return state
def first(iterable):
"""
Returns the first item returned by iterating over an iterable object.
Example:
>>> a = [1, 2, 3]
>>> first(a)
1
"""
return next(iter(iterable))
def itersubclasses(cls, _seen=None):
"""
Generator over all subclasses of a given class, in depth first order.
>>> class A: pass
>>> class B(A): pass
>>> class C(A): pass
>>> class D(B,C): pass
>>> class E(D): pass
>>>
>>> for cls in itersubclasses(A):
... print(cls.__name__)
B
D
E
C
>>> # get ALL classes currently defined
>>> [cls.__name__ for cls in itersubclasses(object)]
[...'tuple', ...'type', ...]
From http://code.activestate.com/recipes/576949/
"""
if _seen is None:
_seen = set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in sorted(subs, key=operator.attrgetter("__name__")):
if sub not in _seen:
_seen.add(sub)
yield sub
for sub in itersubclasses(sub, _seen):
yield sub
def ignore_sigint(func):
"""
This decorator registers a custom SIGINT handler to catch and ignore SIGINT
until the wrapped function is completed.
"""
@wraps(func)
def wrapped(*args, **kwargs):
# Get the name of the current thread and determine if this is a single
# threaded application
curr_thread = threading.current_thread()
single_thread = (
threading.active_count() == 1 and curr_thread.name == "MainThread"
)
class SigintHandler:
def __init__(self):
self.sigint_received = False
def __call__(self, signum, frame):
warnings.warn(
"KeyboardInterrupt ignored until {} is complete!".format(
func.__name__
),
AstropyUserWarning,
)
self.sigint_received = True
sigint_handler = SigintHandler()
# Define new signal interput handler
if single_thread:
# Install new handler
old_handler = signal.signal(signal.SIGINT, sigint_handler)
try:
func(*args, **kwargs)
finally:
if single_thread:
if old_handler is not None:
signal.signal(signal.SIGINT, old_handler)
else:
signal.signal(signal.SIGINT, signal.SIG_DFL)
if sigint_handler.sigint_received:
raise KeyboardInterrupt
return wrapped
def pairwise(iterable):
"""Return the items of an iterable paired with its next item.
Ex: s -> (s0,s1), (s1,s2), (s2,s3), ....
"""
a, b = itertools.tee(iterable)
for _ in b:
# Just a little trick to advance b without having to catch
# StopIter if b happens to be empty
break
return zip(a, b)
def encode_ascii(s):
if isinstance(s, str):
return s.encode("ascii")
elif isinstance(s, np.ndarray) and issubclass(s.dtype.type, np.str_):
ns = np.char.encode(s, "ascii").view(type(s))
if ns.dtype.itemsize != s.dtype.itemsize / 4:
ns = ns.astype((np.bytes_, s.dtype.itemsize / 4))
return ns
elif isinstance(s, np.ndarray) and not issubclass(s.dtype.type, np.bytes_):
raise TypeError("string operation on non-string array")
return s
def decode_ascii(s):
if isinstance(s, bytes):
try:
return s.decode("ascii")
except UnicodeDecodeError:
warnings.warn(
"non-ASCII characters are present in the FITS "
'file header and have been replaced by "?" '
"characters",
AstropyUserWarning,
)
s = s.decode("ascii", errors="replace")
return s.replace("\ufffd", "?")
elif isinstance(s, np.ndarray) and issubclass(s.dtype.type, np.bytes_):
# np.char.encode/decode annoyingly don't preserve the type of the
# array, hence the view() call
# It also doesn't necessarily preserve widths of the strings,
# hence the astype()
if s.size == 0:
# Numpy apparently also has a bug that if a string array is
# empty calling np.char.decode on it returns an empty float64
# array wth
dt = s.dtype.str.replace("S", "U")
ns = np.array([], dtype=dt).view(type(s))
else:
ns = np.char.decode(s, "ascii").view(type(s))
if ns.dtype.itemsize / 4 != s.dtype.itemsize:
ns = ns.astype((np.str_, s.dtype.itemsize))
return ns
elif isinstance(s, np.ndarray) and not issubclass(s.dtype.type, np.str_):
# Don't silently pass through on non-string arrays; we don't want
# to hide errors where things that are not stringy are attempting
# to be decoded
raise TypeError("string operation on non-string array")
return s
def isreadable(f):
"""
Returns True if the file-like object can be read from. This is a common-
sense approximation of io.IOBase.readable.
"""
if hasattr(f, "readable"):
return f.readable()
if hasattr(f, "closed") and f.closed:
# This mimics the behavior of io.IOBase.readable
raise ValueError("I/O operation on closed file")
if not hasattr(f, "read"):
return False
if hasattr(f, "mode") and not any(c in f.mode for c in "r+"):
return False
# Not closed, has a 'read()' method, and either has no known mode or a
# readable mode--should be good enough to assume 'readable'
return True
def iswritable(f):
"""
Returns True if the file-like object can be written to. This is a common-
sense approximation of io.IOBase.writable.
"""
if hasattr(f, "writable"):
return f.writable()
if hasattr(f, "closed") and f.closed:
# This mimics the behavior of io.IOBase.writable
raise ValueError("I/O operation on closed file")
if not hasattr(f, "write"):
return False
if hasattr(f, "mode") and not any(c in f.mode for c in "wa+"):
return False
# Note closed, has a 'write()' method, and either has no known mode or a
# mode that supports writing--should be good enough to assume 'writable'
return True
def isfile(f):
"""
Returns True if the given object represents an OS-level file (that is,
``isinstance(f, file)``).
On Python 3 this also returns True if the given object is higher level
wrapper on top of a FileIO object, such as a TextIOWrapper.
"""
if isinstance(f, io.FileIO):
return True
elif hasattr(f, "buffer"):
return isfile(f.buffer)
elif hasattr(f, "raw"):
return isfile(f.raw)
return False
def fileobj_name(f):
"""
Returns the 'name' of file-like object *f*, if it has anything that could be
called its name. Otherwise f's class or type is returned. If f is a
string f itself is returned.
"""
if isinstance(f, (str, bytes)):
return f
elif isinstance(f, gzip.GzipFile):
# The .name attribute on GzipFiles does not always represent the name
# of the file being read/written--it can also represent the original
# name of the file being compressed
# See the documentation at
# https://docs.python.org/3/library/gzip.html#gzip.GzipFile
# As such, for gzip files only return the name of the underlying
# fileobj, if it exists
return fileobj_name(f.fileobj)
elif hasattr(f, "name"):
return f.name
elif hasattr(f, "filename"):
return f.filename
elif hasattr(f, "__class__"):
return str(f.__class__)
else:
return str(type(f))
def fileobj_closed(f):
"""
Returns True if the given file-like object is closed or if *f* is a string
(and assumed to be a pathname).
Returns False for all other types of objects, under the assumption that
they are file-like objects with no sense of a 'closed' state.
"""
if isinstance(f, path_like):
return True
if hasattr(f, "closed"):
return f.closed
elif hasattr(f, "fileobj") and hasattr(f.fileobj, "closed"):
return f.fileobj.closed
elif hasattr(f, "fp") and hasattr(f.fp, "closed"):
return f.fp.closed
else:
return False
def fileobj_mode(f):
"""
Returns the 'mode' string of a file-like object if such a thing exists.
Otherwise returns None.
"""
# Go from most to least specific--for example gzip objects have a 'mode'
# attribute, but it's not analogous to the file.mode attribute
# gzip.GzipFile -like
if hasattr(f, "fileobj") and hasattr(f.fileobj, "mode"):
fileobj = f.fileobj
# astropy.io.fits._File -like, doesn't need additional checks because it's
# already validated
elif hasattr(f, "fileobj_mode"):
return f.fileobj_mode
# PIL-Image -like investigate the fp (filebuffer)
elif hasattr(f, "fp") and hasattr(f.fp, "mode"):
fileobj = f.fp
# FILEIO -like (normal open(...)), keep as is.
elif hasattr(f, "mode"):
fileobj = f
# Doesn't look like a file-like object, for example strings, urls or paths.
else:
return None
return _fileobj_normalize_mode(fileobj)
def _fileobj_normalize_mode(f):
"""Takes care of some corner cases in Python where the mode string
is either oddly formatted or does not truly represent the file mode.
"""
mode = f.mode
# Special case: Gzip modes:
if isinstance(f, gzip.GzipFile):
# GzipFiles can be either readonly or writeonly
if mode == gzip.READ:
return "rb"
elif mode == gzip.WRITE:
return "wb"
else:
return None # This shouldn't happen?
# Sometimes Python can produce modes like 'r+b' which will be normalized
# here to 'rb+'
if "+" in mode:
mode = mode.replace("+", "")
mode += "+"
return mode
def fileobj_is_binary(f):
"""
Returns True if the give file or file-like object has a file open in binary
mode. When in doubt, returns True by default.
"""
# This is kind of a hack for this to work correctly with _File objects,
# which, for the time being, are *always* binary
if hasattr(f, "binary"):
return f.binary
if isinstance(f, io.TextIOBase):
return False
mode = fileobj_mode(f)
if mode:
return "b" in mode
else:
return True
def translate(s, table, deletechars):
if deletechars:
table = table.copy()
for c in deletechars:
table[ord(c)] = None
return s.translate(table)
def fill(text, width, **kwargs):
"""
Like :func:`textwrap.wrap` but preserves existing paragraphs which
:func:`textwrap.wrap` does not otherwise handle well. Also handles section
headers.
"""
paragraphs = text.split("\n\n")
def maybe_fill(t):
if all(len(l) < width for l in t.splitlines()):
return t
else:
return textwrap.fill(t, width, **kwargs)
return "\n\n".join(maybe_fill(p) for p in paragraphs)
# On MacOS X 10.8 and earlier, there is a bug that causes numpy.fromfile to
# fail when reading over 2Gb of data. If we detect these versions of MacOS X,
# we can instead read the data in chunks. To avoid performance penalties at
# import time, we defer the setting of this global variable until the first
# time it is needed.
CHUNKED_FROMFILE = None
def _array_from_file(infile, dtype, count):
"""Create a numpy array from a file or a file-like object."""
if isfile(infile):
global CHUNKED_FROMFILE
if CHUNKED_FROMFILE is None:
if sys.platform == "darwin" and Version(platform.mac_ver()[0]) < Version(
"10.9"
):
CHUNKED_FROMFILE = True
else:
CHUNKED_FROMFILE = False
if CHUNKED_FROMFILE:
chunk_size = int(1024**3 / dtype.itemsize) # 1Gb to be safe
if count < chunk_size:
return np.fromfile(infile, dtype=dtype, count=count)
else:
array = np.empty(count, dtype=dtype)
for beg in range(0, count, chunk_size):
end = min(count, beg + chunk_size)
array[beg:end] = np.fromfile(infile, dtype=dtype, count=end - beg)
return array
else:
return np.fromfile(infile, dtype=dtype, count=count)
else:
# treat as file-like object with "read" method; this includes gzip file
# objects, because numpy.fromfile just reads the compressed bytes from
# their underlying file object, instead of the decompressed bytes
read_size = np.dtype(dtype).itemsize * count
s = infile.read(read_size)
array = np.ndarray(buffer=s, dtype=dtype, shape=(count,))
# copy is needed because np.frombuffer returns a read-only view of the
# underlying buffer
array = array.copy()
return array
_OSX_WRITE_LIMIT = (2**32) - 1
_WIN_WRITE_LIMIT = (2**31) - 1
def _array_to_file(arr, outfile):
"""
Write a numpy array to a file or a file-like object.
Parameters
----------
arr : ndarray
The Numpy array to write.
outfile : file-like
A file-like object such as a Python file object, an `io.BytesIO`, or
anything else with a ``write`` method. The file object must support
the buffer interface in its ``write``.
If writing directly to an on-disk file this delegates directly to
`ndarray.tofile`. Otherwise a slower Python implementation is used.
"""
try:
seekable = outfile.seekable()
except AttributeError:
seekable = False
if isfile(outfile) and seekable:
write = lambda a, f: a.tofile(f)
else:
write = _array_to_file_like
# Implements a workaround for a bug deep in OSX's stdlib file writing
# functions; on 64-bit OSX it is not possible to correctly write a number
# of bytes greater than 2 ** 32 and divisible by 4096 (or possibly 8192--
# whatever the default blocksize for the filesystem is).
# This issue should have a workaround in Numpy too, but hasn't been
# implemented there yet: https://github.com/astropy/astropy/issues/839
#
# Apparently Windows has its own fwrite bug:
# https://github.com/numpy/numpy/issues/2256
if (
sys.platform == "darwin"
and arr.nbytes >= _OSX_WRITE_LIMIT + 1
and arr.nbytes % 4096 == 0
):
# chunksize is a count of elements in the array, not bytes
chunksize = _OSX_WRITE_LIMIT // arr.itemsize
elif sys.platform.startswith("win"):
chunksize = _WIN_WRITE_LIMIT // arr.itemsize
else:
# Just pass the whole array to the write routine
return write(arr, outfile)
# Write one chunk at a time for systems whose fwrite chokes on large
# writes.
idx = 0
arr = arr.view(np.ndarray).flatten()
while idx < arr.nbytes:
write(arr[idx : idx + chunksize], outfile)
idx += chunksize
def _array_to_file_like(arr, fileobj):
"""
Write a `~numpy.ndarray` to a file-like object (which is not supported by
`numpy.ndarray.tofile`).
"""
# If the array is empty, we can simply take a shortcut and return since
# there is nothing to write.
if len(arr) == 0:
return
if arr.flags.contiguous:
# It suffices to just pass the underlying buffer directly to the
# fileobj's write (assuming it supports the buffer interface). If
# it does not have the buffer interface, a TypeError should be returned
# in which case we can fall back to the other methods.
try:
fileobj.write(arr.data)
except TypeError:
pass
else:
return
if hasattr(np, "nditer"):
# nditer version for non-contiguous arrays
for item in np.nditer(arr, order="C"):
fileobj.write(item.tobytes())
else:
# Slower version for Numpy versions without nditer;
# The problem with flatiter is it doesn't preserve the original
# byteorder
byteorder = arr.dtype.byteorder
if (sys.byteorder == "little" and byteorder == ">") or (
sys.byteorder == "big" and byteorder == "<"
):
for item in arr.flat:
fileobj.write(item.byteswap().tobytes())
else:
for item in arr.flat:
fileobj.write(item.tobytes())
def _write_string(f, s):
"""
Write a string to a file, encoding to ASCII if the file is open in binary
mode, or decoding if the file is open in text mode.
"""
# Assume if the file object doesn't have a specific mode, that the mode is
# binary
binmode = fileobj_is_binary(f)
if binmode and isinstance(s, str):
s = encode_ascii(s)
elif not binmode and not isinstance(f, str):
s = decode_ascii(s)
f.write(s)
def _convert_array(array, dtype):
"""
Converts an array to a new dtype--if the itemsize of the new dtype is
the same as the old dtype and both types are not numeric, a view is
returned. Otherwise a new array must be created.
"""
if array.dtype == dtype:
return array
elif array.dtype.itemsize == dtype.itemsize and not (
np.issubdtype(array.dtype, np.number) and np.issubdtype(dtype, np.number)
):
# Includes a special case when both dtypes are at least numeric to
# account for old Trac ticket 218 (now inaccessible).
return array.view(dtype)
else:
return array.astype(dtype)
def _pseudo_zero(dtype):
"""
Given a numpy dtype, finds its "zero" point, which is exactly in the
middle of its range.
"""
# special case for int8
if dtype.kind == "i" and dtype.itemsize == 1:
return -128
assert dtype.kind == "u"
return 1 << (dtype.itemsize * 8 - 1)
def _is_pseudo_integer(dtype):
return (dtype.kind == "u" and dtype.itemsize >= 2) or (
dtype.kind == "i" and dtype.itemsize == 1
)
def _is_int(val):
return isinstance(val, all_integer_types)
def _str_to_num(val):
"""Converts a given string to either an int or a float if necessary."""
try:
num = int(val)
except ValueError:
# If this fails then an exception should be raised anyways
num = float(val)
return num
def _words_group(s, width):
"""
Split a long string into parts where each part is no longer than ``strlen``
and no word is cut into two pieces. But if there are any single words
which are longer than ``strlen``, then they will be split in the middle of
the word.
"""
words = []
slen = len(s)
# appending one blank at the end always ensures that the "last" blank
# is beyond the end of the string
arr = np.frombuffer(s.encode("utf8") + b" ", dtype="S1")
# locations of the blanks
blank_loc = np.nonzero(arr == b" ")[0]
offset = 0
xoffset = 0
while True:
try:
loc = np.nonzero(blank_loc >= width + offset)[0][0]
except IndexError:
loc = len(blank_loc)
if loc > 0:
offset = blank_loc[loc - 1] + 1
else:
offset = -1
# check for one word longer than strlen, break in the middle
if offset <= xoffset:
offset = min(xoffset + width, slen)
# collect the pieces in a list
words.append(s[xoffset:offset])
if offset >= slen:
break
xoffset = offset
return words
def _tmp_name(input):
"""
Create a temporary file name which should not already exist. Use the
directory of the input file as the base name of the mkstemp() output.
"""
if input is not None:
input = os.path.dirname(input)
f, fn = tempfile.mkstemp(dir=input)
os.close(f)
return fn
def _get_array_mmap(array):
"""
If the array has an mmap.mmap at base of its base chain, return the mmap
object; otherwise return None.
"""
if isinstance(array, mmap.mmap):
return array
base = array
while hasattr(base, "base") and base.base is not None:
if isinstance(base.base, mmap.mmap):
return base.base
base = base.base
@contextmanager
def _free_space_check(hdulist, dirname=None):
try:
yield
except OSError as exc:
error_message = ""
if not isinstance(hdulist, list):
hdulist = [
hdulist,
]
if dirname is None:
dirname = os.path.dirname(hdulist._file.name)
if os.path.isdir(dirname):
free_space = data.get_free_space_in_dir(dirname)
hdulist_size = sum(hdu.size for hdu in hdulist)
if free_space < hdulist_size:
error_message = (
"Not enough space on disk: requested {}, available {}. ".format(
hdulist_size, free_space
)
)
for hdu in hdulist:
hdu._close()
raise OSError(error_message + str(exc))
def _extract_number(value, default):
"""
Attempts to extract an integer number from the given value. If the
extraction fails, the value of the 'default' argument is returned.
"""
try:
# The _str_to_num method converts the value to string/float
# so we need to perform one additional conversion to int on top
return int(_str_to_num(value))
except (TypeError, ValueError):
return default
def get_testdata_filepath(filename):
"""
Return a string representing the path to the file requested from the
io.fits test data set.
.. versionadded:: 2.0.3
Parameters
----------
filename : str
The filename of the test data file.
Returns
-------
filepath : str
The path to the requested file.
"""
return data.get_pkg_data_filename(f"io/fits/tests/data/{filename}", "astropy")
def _rstrip_inplace(array):
"""
Performs an in-place rstrip operation on string arrays. This is necessary
since the built-in `np.char.rstrip` in Numpy does not perform an in-place
calculation.
"""
# The following implementation convert the string to unsigned integers of
# the right length. Trailing spaces (which are represented as 32) are then
# converted to null characters (represented as zeros). To avoid creating
# large temporary mask arrays, we loop over chunks (attempting to do that
# on a 1-D version of the array; large memory may still be needed in the
# unlikely case that a string array has small first dimension and cannot
# be represented as a contiguous 1-D array in memory).
dt = array.dtype
if dt.kind not in "SU":
raise TypeError("This function can only be used on string arrays")
# View the array as appropriate integers. The last dimension will
# equal the number of characters in each string.
bpc = 1 if dt.kind == "S" else 4
dt_int = f"({dt.itemsize // bpc},){dt.byteorder}u{bpc}"
b = array.view(dt_int, np.ndarray)
# For optimal speed, work in chunks of the internal ufunc buffer size.
bufsize = np.getbufsize()
# Attempt to have the strings as a 1-D array to give the chunk known size.
# Note: the code will work if this fails; the chunks will just be larger.
if b.ndim > 2:
try:
b.shape = -1, b.shape[-1]
except AttributeError: # can occur for non-contiguous arrays
pass
for j in range(0, b.shape[0], bufsize):
c = b[j : j + bufsize]
# Mask which will tell whether we're in a sequence of trailing spaces.
mask = np.ones(c.shape[:-1], dtype=bool)
# Loop over the characters in the strings, in reverse order. We process
# the i-th character of all strings in the chunk at the same time. If
# the character is 32, this corresponds to a space, and we then change
# this to 0. We then construct a new mask to find rows where the
# i-th character is 0 (null) and the i-1-th is 32 (space) and repeat.
for i in range(-1, -c.shape[-1], -1):
mask &= c[..., i] == 32
c[..., i][mask] = 0
mask = c[..., i] == 0
return array
def _is_dask_array(data):
"""Check whether data is a dask array.
We avoid importing dask unless it is likely it is a dask array,
so that non-dask code is not slowed down.
"""
if not hasattr(data, "compute"):
return False
try:
from dask.array import Array
except ImportError:
# If we cannot import dask, surely this cannot be a
# dask array!
return False
else:
return isinstance(data, Array)
|
7d77010fc8f19bea4e1bdda4437338f14afbf39d18972375390d6881ceccd467 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import re
import warnings
import numpy as np
from astropy.utils.exceptions import AstropyUserWarning
from . import conf
from .util import _is_int, _str_to_num, _words_group, translate
from .verify import VerifyError, VerifyWarning, _ErrList, _Verify
__all__ = ["Card", "Undefined"]
FIX_FP_TABLE = str.maketrans("de", "DE")
FIX_FP_TABLE2 = str.maketrans("dD", "eE")
CARD_LENGTH = 80
BLANK_CARD = " " * CARD_LENGTH
KEYWORD_LENGTH = 8 # The max length for FITS-standard keywords
VALUE_INDICATOR = "= " # The standard FITS value indicator
VALUE_INDICATOR_LEN = len(VALUE_INDICATOR)
HIERARCH_VALUE_INDICATOR = "=" # HIERARCH cards may use a shortened indicator
class Undefined:
"""Undefined value."""
def __init__(self):
# This __init__ is required to be here for Sphinx documentation
pass
UNDEFINED = Undefined()
class Card(_Verify):
length = CARD_LENGTH
"""The length of a Card image; should always be 80 for valid FITS files."""
# String for a FITS standard compliant (FSC) keyword.
_keywd_FSC_RE = re.compile(r"^[A-Z0-9_-]{0,%d}$" % KEYWORD_LENGTH)
# This will match any printable ASCII character excluding '='
_keywd_hierarch_RE = re.compile(r"^(?:HIERARCH +)?(?:^[ -<>-~]+ ?)+$", re.I)
# A number sub-string, either an integer or a float in fixed or
# scientific notation. One for FSC and one for non-FSC (NFSC) format:
# NFSC allows lower case of DE for exponent, allows space between sign,
# digits, exponent sign, and exponents
_digits_FSC = r"(\.\d+|\d+(\.\d*)?)([DE][+-]?\d+)?"
_digits_NFSC = r"(\.\d+|\d+(\.\d*)?) *([deDE] *[+-]? *\d+)?"
_numr_FSC = r"[+-]?" + _digits_FSC
_numr_NFSC = r"[+-]? *" + _digits_NFSC
# This regex helps delete leading zeros from numbers, otherwise
# Python might evaluate them as octal values (this is not-greedy, however,
# so it may not strip leading zeros from a float, which is fine)
_number_FSC_RE = re.compile(rf"(?P<sign>[+-])?0*?(?P<digt>{_digits_FSC})")
_number_NFSC_RE = re.compile(rf"(?P<sign>[+-])? *0*?(?P<digt>{_digits_NFSC})")
# Used in cards using the CONTINUE convention which expect a string
# followed by an optional comment
_strg = r"\'(?P<strg>([ -~]+?|\'\'|) *?)\'(?=$|/| )"
_comm_field = r"(?P<comm_field>(?P<sepr>/ *)(?P<comm>(.|\n)*))"
_strg_comment_RE = re.compile(f"({_strg})? *{_comm_field}?")
# FSC commentary card string which must contain printable ASCII characters.
# Note: \Z matches the end of the string without allowing newlines
_ascii_text_re = re.compile(r"[ -~]*\Z")
# Checks for a valid value/comment string. It returns a match object
# for a valid value/comment string.
# The valu group will return a match if a FITS string, boolean,
# number, or complex value is found, otherwise it will return
# None, meaning the keyword is undefined. The comment field will
# return a match if the comment separator is found, though the
# comment maybe an empty string.
# fmt: off
_value_FSC_RE = re.compile(
r'(?P<valu_field> *'
r'(?P<valu>'
# The <strg> regex is not correct for all cases, but
# it comes pretty darn close. It appears to find the
# end of a string rather well, but will accept
# strings with an odd number of single quotes,
# instead of issuing an error. The FITS standard
# appears vague on this issue and only states that a
# string should not end with two single quotes,
# whereas it should not end with an even number of
# quotes to be precise.
#
# Note that a non-greedy match is done for a string,
# since a greedy match will find a single-quote after
# the comment separator resulting in an incorrect
# match.
rf'{_strg}|'
r'(?P<bool>[FT])|'
r'(?P<numr>' + _numr_FSC + r')|'
r'(?P<cplx>\( *'
r'(?P<real>' + _numr_FSC + r') *, *'
r'(?P<imag>' + _numr_FSC + r') *\))'
r')? *)'
r'(?P<comm_field>'
r'(?P<sepr>/ *)'
r'(?P<comm>[!-~][ -~]*)?'
r')?$'
)
# fmt: on
# fmt: off
_value_NFSC_RE = re.compile(
r'(?P<valu_field> *'
r'(?P<valu>'
rf'{_strg}|'
r'(?P<bool>[FT])|'
r'(?P<numr>' + _numr_NFSC + r')|'
r'(?P<cplx>\( *'
r'(?P<real>' + _numr_NFSC + r') *, *'
r'(?P<imag>' + _numr_NFSC + r') *\))'
fr')? *){_comm_field}?$'
)
# fmt: on
_rvkc_identifier = r"[a-zA-Z_]\w*"
_rvkc_field = _rvkc_identifier + r"(\.\d+)?"
_rvkc_field_specifier_s = rf"{_rvkc_field}(\.{_rvkc_field})*"
_rvkc_field_specifier_val = r"(?P<keyword>{}): +(?P<val>{})".format(
_rvkc_field_specifier_s, _numr_FSC
)
_rvkc_keyword_val = rf"\'(?P<rawval>{_rvkc_field_specifier_val})\'"
_rvkc_keyword_val_comm = rf" *{_rvkc_keyword_val} *(/ *(?P<comm>[ -~]*))?$"
_rvkc_field_specifier_val_RE = re.compile(_rvkc_field_specifier_val + "$")
# regular expression to extract the key and the field specifier from a
# string that is being used to index into a card list that contains
# record value keyword cards (ex. 'DP1.AXIS.1')
_rvkc_keyword_name_RE = re.compile(
r"(?P<keyword>{})\.(?P<field_specifier>{})$".format(
_rvkc_identifier, _rvkc_field_specifier_s
)
)
# regular expression to extract the field specifier and value and comment
# from the string value of a record value keyword card
# (ex "'AXIS.1: 1' / a comment")
_rvkc_keyword_val_comm_RE = re.compile(_rvkc_keyword_val_comm)
_commentary_keywords = {"", "COMMENT", "HISTORY", "END"}
_special_keywords = _commentary_keywords.union(["CONTINUE"])
# The default value indicator; may be changed if required by a convention
# (namely HIERARCH cards)
_value_indicator = VALUE_INDICATOR
def __init__(self, keyword=None, value=None, comment=None, **kwargs):
# For backwards compatibility, support the 'key' keyword argument:
if keyword is None and "key" in kwargs:
keyword = kwargs["key"]
self._keyword = None
self._value = None
self._comment = None
self._valuestring = None
self._image = None
# This attribute is set to False when creating the card from a card
# image to ensure that the contents of the image get verified at some
# point
self._verified = True
# A flag to conveniently mark whether or not this was a valid HIERARCH
# card
self._hierarch = False
# If the card could not be parsed according the the FITS standard or
# any recognized non-standard conventions, this will be True
self._invalid = False
self._field_specifier = None
# These are used primarily only by RVKCs
self._rawkeyword = None
self._rawvalue = None
if not (
keyword is not None
and value is not None
and self._check_if_rvkc(keyword, value)
):
# If _check_if_rvkc passes, it will handle setting the keyword and
# value
if keyword is not None:
self.keyword = keyword
if value is not None:
self.value = value
if comment is not None:
self.comment = comment
self._modified = False
self._valuemodified = False
def __repr__(self):
return repr((self.keyword, self.value, self.comment))
def __str__(self):
return self.image
def __len__(self):
return 3
def __getitem__(self, index):
return (self.keyword, self.value, self.comment)[index]
@property
def keyword(self):
"""Returns the keyword name parsed from the card image."""
if self._keyword is not None:
return self._keyword
elif self._image:
self._keyword = self._parse_keyword()
return self._keyword
else:
self.keyword = ""
return ""
@keyword.setter
def keyword(self, keyword):
"""Set the key attribute; once set it cannot be modified."""
if self._keyword is not None:
raise AttributeError("Once set, the Card keyword may not be modified")
elif isinstance(keyword, str):
# Be nice and remove trailing whitespace--some FITS code always
# pads keywords out with spaces; leading whitespace, however,
# should be strictly disallowed.
keyword = keyword.rstrip()
keyword_upper = keyword.upper()
if len(keyword) <= KEYWORD_LENGTH and self._keywd_FSC_RE.match(
keyword_upper
):
# For keywords with length > 8 they will be HIERARCH cards,
# and can have arbitrary case keywords
if keyword_upper == "END":
raise ValueError("Keyword 'END' not allowed.")
keyword = keyword_upper
elif self._keywd_hierarch_RE.match(keyword):
# In prior versions of PyFITS (*) HIERARCH cards would only be
# created if the user-supplied keyword explicitly started with
# 'HIERARCH '. Now we will create them automatically for long
# keywords, but we still want to support the old behavior too;
# the old behavior makes it possible to create HIERARCH cards
# that would otherwise be recognized as RVKCs
# (*) This has never affected Astropy, because it was changed
# before PyFITS was merged into Astropy!
self._hierarch = True
self._value_indicator = HIERARCH_VALUE_INDICATOR
if keyword_upper[:9] == "HIERARCH ":
# The user explicitly asked for a HIERARCH card, so don't
# bug them about it...
keyword = keyword[9:].strip()
else:
# We'll gladly create a HIERARCH card, but a warning is
# also displayed
warnings.warn(
"Keyword name {!r} is greater than 8 characters or "
"contains characters not allowed by the FITS "
"standard; a HIERARCH card will be created.".format(keyword),
VerifyWarning,
)
else:
raise ValueError(f"Illegal keyword name: {keyword!r}.")
self._keyword = keyword
self._modified = True
else:
raise ValueError(f"Keyword name {keyword!r} is not a string.")
@property
def value(self):
"""The value associated with the keyword stored in this card."""
if self.field_specifier:
return float(self._value)
if self._value is not None:
value = self._value
elif self._valuestring is not None or self._image:
value = self._value = self._parse_value()
else:
if self._keyword == "":
self._value = value = ""
else:
self._value = value = UNDEFINED
if conf.strip_header_whitespace and isinstance(value, str):
value = value.rstrip()
return value
@value.setter
def value(self, value):
if self._invalid:
raise ValueError(
"The value of invalid/unparsable cards cannot set. Either "
"delete this card from the header or replace it."
)
if value is None:
value = UNDEFINED
try:
oldvalue = self.value
except VerifyError:
# probably a parsing error, falling back to the internal _value
# which should be None. This may happen while calling _fix_value.
oldvalue = self._value
if oldvalue is None:
oldvalue = UNDEFINED
if not isinstance(
value,
(
str,
int,
float,
complex,
bool,
Undefined,
np.floating,
np.integer,
np.complexfloating,
np.bool_,
),
):
raise ValueError(f"Illegal value: {value!r}.")
if isinstance(value, (float, np.float32)) and (
np.isnan(value) or np.isinf(value)
):
# value is checked for both float and np.float32 instances
# since np.float32 is not considered a Python float.
raise ValueError(
"Floating point {!r} values are not allowed in FITS headers.".format(
value
)
)
elif isinstance(value, str):
m = self._ascii_text_re.match(value)
if not m:
raise ValueError(
"FITS header values must contain standard printable ASCII "
"characters; {!r} contains characters not representable in "
"ASCII or non-printable characters.".format(value)
)
elif isinstance(value, np.bool_):
value = bool(value)
if conf.strip_header_whitespace and (
isinstance(oldvalue, str) and isinstance(value, str)
):
# Ignore extra whitespace when comparing the new value to the old
different = oldvalue.rstrip() != value.rstrip()
elif isinstance(oldvalue, bool) or isinstance(value, bool):
different = oldvalue is not value
else:
different = oldvalue != value or not isinstance(value, type(oldvalue))
if different:
self._value = value
self._rawvalue = None
self._modified = True
self._valuestring = None
self._valuemodified = True
if self.field_specifier:
try:
self._value = _int_or_float(self._value)
except ValueError:
raise ValueError(f"value {self._value} is not a float")
@value.deleter
def value(self):
if self._invalid:
raise ValueError(
"The value of invalid/unparsable cards cannot deleted. "
"Either delete this card from the header or replace it."
)
if not self.field_specifier:
self.value = ""
else:
raise AttributeError(
"Values cannot be deleted from record-valued keyword cards"
)
@property
def rawkeyword(self):
"""On record-valued keyword cards this is the name of the standard <= 8
character FITS keyword that this RVKC is stored in. Otherwise it is
the card's normal keyword.
"""
if self._rawkeyword is not None:
return self._rawkeyword
elif self.field_specifier is not None:
self._rawkeyword = self.keyword.split(".", 1)[0]
return self._rawkeyword
else:
return self.keyword
@property
def rawvalue(self):
"""On record-valued keyword cards this is the raw string value in
the ``<field-specifier>: <value>`` format stored in the card in order
to represent a RVKC. Otherwise it is the card's normal value.
"""
if self._rawvalue is not None:
return self._rawvalue
elif self.field_specifier is not None:
self._rawvalue = f"{self.field_specifier}: {self.value}"
return self._rawvalue
else:
return self.value
@property
def comment(self):
"""Get the comment attribute from the card image if not already set."""
if self._comment is not None:
return self._comment
elif self._image:
self._comment = self._parse_comment()
return self._comment
else:
self._comment = ""
return ""
@comment.setter
def comment(self, comment):
if self._invalid:
raise ValueError(
"The comment of invalid/unparsable cards cannot set. Either "
"delete this card from the header or replace it."
)
if comment is None:
comment = ""
if isinstance(comment, str):
m = self._ascii_text_re.match(comment)
if not m:
raise ValueError(
"FITS header comments must contain standard printable "
"ASCII characters; {!r} contains characters not "
"representable in ASCII or non-printable characters.".format(
comment
)
)
try:
oldcomment = self.comment
except VerifyError:
# probably a parsing error, falling back to the internal _comment
# which should be None.
oldcomment = self._comment
if oldcomment is None:
oldcomment = ""
if comment != oldcomment:
self._comment = comment
self._modified = True
@comment.deleter
def comment(self):
if self._invalid:
raise ValueError(
"The comment of invalid/unparsable cards cannot deleted. "
"Either delete this card from the header or replace it."
)
self.comment = ""
@property
def field_specifier(self):
"""
The field-specifier of record-valued keyword cards; always `None` on
normal cards.
"""
# Ensure that the keyword exists and has been parsed--the will set the
# internal _field_specifier attribute if this is a RVKC.
if self.keyword:
return self._field_specifier
else:
return None
@field_specifier.setter
def field_specifier(self, field_specifier):
if not field_specifier:
raise ValueError(
"The field-specifier may not be blank in record-valued keyword cards."
)
elif not self.field_specifier:
raise AttributeError(
"Cannot coerce cards to be record-valued "
"keyword cards by setting the "
"field_specifier attribute"
)
elif field_specifier != self.field_specifier:
self._field_specifier = field_specifier
# The keyword need also be updated
keyword = self._keyword.split(".", 1)[0]
self._keyword = ".".join([keyword, field_specifier])
self._modified = True
@field_specifier.deleter
def field_specifier(self):
raise AttributeError(
"The field_specifier attribute may not be "
"deleted from record-valued keyword cards."
)
@property
def image(self):
"""
The card "image", that is, the 80 byte character string that represents
this card in an actual FITS header.
"""
if self._image and not self._verified:
self.verify("fix+warn")
if self._image is None or self._modified:
self._image = self._format_image()
return self._image
@property
def is_blank(self):
"""
`True` if the card is completely blank--that is, it has no keyword,
value, or comment. It appears in the header as 80 spaces.
Returns `False` otherwise.
"""
if not self._verified:
# The card image has not been parsed yet; compare directly with the
# string representation of a blank card
return self._image == BLANK_CARD
# If the keyword, value, and comment are all empty (for self.value
# explicitly check that it is a string value, since a blank value is
# returned as '')
return (
not self.keyword
and (isinstance(self.value, str) and not self.value)
and not self.comment
)
@classmethod
def fromstring(cls, image):
"""
Construct a `Card` object from a (raw) string. It will pad the string
if it is not the length of a card image (80 columns). If the card
image is longer than 80 columns, assume it contains ``CONTINUE``
card(s).
"""
card = cls()
if isinstance(image, bytes):
# FITS supports only ASCII, but decode as latin1 and just take all
# bytes for now; if it results in mojibake due to e.g. UTF-8
# encoded data in a FITS header that's OK because it shouldn't be
# there in the first place
image = image.decode("latin1")
card._image = _pad(image)
card._verified = False
return card
@classmethod
def normalize_keyword(cls, keyword):
"""
`classmethod` to convert a keyword value that may contain a
field-specifier to uppercase. The effect is to raise the key to
uppercase and leave the field specifier in its original case.
Parameters
----------
keyword : or str
A keyword value or a ``keyword.field-specifier`` value
"""
# Test first for the most common case: a standard FITS keyword provided
# in standard all-caps
if len(keyword) <= KEYWORD_LENGTH and cls._keywd_FSC_RE.match(keyword):
return keyword
# Test if this is a record-valued keyword
match = cls._rvkc_keyword_name_RE.match(keyword)
if match:
return ".".join(
(match.group("keyword").strip().upper(), match.group("field_specifier"))
)
elif len(keyword) > 9 and keyword[:9].upper() == "HIERARCH ":
# Remove 'HIERARCH' from HIERARCH keywords; this could lead to
# ambiguity if there is actually a keyword card containing
# "HIERARCH HIERARCH", but shame on you if you do that.
return keyword[9:].strip().upper()
else:
# A normal FITS keyword, but provided in non-standard case
return keyword.strip().upper()
def _check_if_rvkc(self, *args):
"""
Determine whether or not the card is a record-valued keyword card.
If one argument is given, that argument is treated as a full card image
and parsed as such. If two arguments are given, the first is treated
as the card keyword (including the field-specifier if the card is
intended as a RVKC), and the second as the card value OR the first value
can be the base keyword, and the second value the 'field-specifier:
value' string.
If the check passes the ._keyword, ._value, and .field_specifier
keywords are set.
Examples
--------
::
self._check_if_rvkc('DP1', 'AXIS.1: 2')
self._check_if_rvkc('DP1.AXIS.1', 2)
self._check_if_rvkc('DP1 = AXIS.1: 2')
"""
if not conf.enable_record_valued_keyword_cards:
return False
if len(args) == 1:
return self._check_if_rvkc_image(*args)
elif len(args) == 2:
keyword, value = args
if not isinstance(keyword, str):
return False
if keyword in self._commentary_keywords:
return False
match = self._rvkc_keyword_name_RE.match(keyword)
if match and isinstance(value, (int, float)):
self._init_rvkc(
match.group("keyword"), match.group("field_specifier"), None, value
)
return True
# Testing for ': ' is a quick way to avoid running the full regular
# expression, speeding this up for the majority of cases
if isinstance(value, str) and value.find(": ") > 0:
match = self._rvkc_field_specifier_val_RE.match(value)
if match and self._keywd_FSC_RE.match(keyword):
self._init_rvkc(
keyword, match.group("keyword"), value, match.group("val")
)
return True
def _check_if_rvkc_image(self, *args):
"""
Implements `Card._check_if_rvkc` for the case of an unparsed card
image. If given one argument this is the full intact image. If given
two arguments the card has already been split between keyword and
value+comment at the standard value indicator '= '.
"""
if len(args) == 1:
image = args[0]
eq_idx = image.find(VALUE_INDICATOR)
if eq_idx < 0 or eq_idx > 9:
return False
keyword = image[:eq_idx]
rest = image[eq_idx + VALUE_INDICATOR_LEN :]
else:
keyword, rest = args
rest = rest.lstrip()
# This test allows us to skip running the full regular expression for
# the majority of cards that do not contain strings or that definitely
# do not contain RVKC field-specifiers; it's very much a
# micro-optimization but it does make a measurable difference
if not rest or rest[0] != "'" or rest.find(": ") < 2:
return False
match = self._rvkc_keyword_val_comm_RE.match(rest)
if match:
self._init_rvkc(
keyword,
match.group("keyword"),
match.group("rawval"),
match.group("val"),
)
return True
def _init_rvkc(self, keyword, field_specifier, field, value):
"""
Sort of addendum to Card.__init__ to set the appropriate internal
attributes if the card was determined to be a RVKC.
"""
keyword_upper = keyword.upper()
self._keyword = ".".join((keyword_upper, field_specifier))
self._rawkeyword = keyword_upper
self._field_specifier = field_specifier
self._value = _int_or_float(value)
self._rawvalue = field
def _parse_keyword(self):
keyword = self._image[:KEYWORD_LENGTH].strip()
keyword_upper = keyword.upper()
if keyword_upper in self._special_keywords:
return keyword_upper
elif (
keyword_upper == "HIERARCH"
and self._image[8] == " "
and HIERARCH_VALUE_INDICATOR in self._image
):
# This is valid HIERARCH card as described by the HIERARCH keyword
# convention:
# http://fits.gsfc.nasa.gov/registry/hierarch_keyword.html
self._hierarch = True
self._value_indicator = HIERARCH_VALUE_INDICATOR
keyword = self._image.split(HIERARCH_VALUE_INDICATOR, 1)[0][9:]
return keyword.strip()
else:
val_ind_idx = self._image.find(VALUE_INDICATOR)
if 0 <= val_ind_idx <= KEYWORD_LENGTH:
# The value indicator should appear in byte 8, but we are
# flexible and allow this to be fixed
if val_ind_idx < KEYWORD_LENGTH:
keyword = keyword[:val_ind_idx]
keyword_upper = keyword_upper[:val_ind_idx]
rest = self._image[val_ind_idx + VALUE_INDICATOR_LEN :]
# So far this looks like a standard FITS keyword; check whether
# the value represents a RVKC; if so then we pass things off to
# the RVKC parser
if self._check_if_rvkc_image(keyword, rest):
return self._keyword
return keyword_upper
else:
warnings.warn(
"The following header keyword is invalid or follows an "
"unrecognized non-standard convention:\n{}".format(self._image),
AstropyUserWarning,
)
self._invalid = True
return keyword
def _parse_value(self):
"""Extract the keyword value from the card image."""
# for commentary cards, no need to parse further
# Likewise for invalid cards
if self.keyword.upper() in self._commentary_keywords or self._invalid:
return self._image[KEYWORD_LENGTH:].rstrip()
if self._check_if_rvkc(self._image):
return self._value
m = self._value_NFSC_RE.match(self._split()[1])
if m is None:
raise VerifyError(
"Unparsable card ({}), fix it first with .verify('fix').".format(
self.keyword
)
)
if m.group("bool") is not None:
value = m.group("bool") == "T"
elif m.group("strg") is not None:
value = re.sub("''", "'", m.group("strg"))
elif m.group("numr") is not None:
# Check for numbers with leading 0s.
numr = self._number_NFSC_RE.match(m.group("numr"))
digt = translate(numr.group("digt"), FIX_FP_TABLE2, " ")
if numr.group("sign") is None:
sign = ""
else:
sign = numr.group("sign")
value = _str_to_num(sign + digt)
elif m.group("cplx") is not None:
# Check for numbers with leading 0s.
real = self._number_NFSC_RE.match(m.group("real"))
rdigt = translate(real.group("digt"), FIX_FP_TABLE2, " ")
if real.group("sign") is None:
rsign = ""
else:
rsign = real.group("sign")
value = _str_to_num(rsign + rdigt)
imag = self._number_NFSC_RE.match(m.group("imag"))
idigt = translate(imag.group("digt"), FIX_FP_TABLE2, " ")
if imag.group("sign") is None:
isign = ""
else:
isign = imag.group("sign")
value += _str_to_num(isign + idigt) * 1j
else:
value = UNDEFINED
if not self._valuestring:
self._valuestring = m.group("valu")
return value
def _parse_comment(self):
"""Extract the keyword value from the card image."""
# for commentary cards, no need to parse further
# likewise for invalid/unparsable cards
if self.keyword in Card._commentary_keywords or self._invalid:
return ""
valuecomment = self._split()[1]
m = self._value_NFSC_RE.match(valuecomment)
comment = ""
if m is not None:
# Don't combine this if statement with the one above, because
# we only want the elif case to run if this was not a valid
# card at all
if m.group("comm"):
comment = m.group("comm").rstrip()
elif "/" in valuecomment:
# The value in this FITS file was not in a valid/known format. In
# this case the best we can do is guess that everything after the
# first / was meant to be the comment
comment = valuecomment.split("/", 1)[1].strip()
return comment
def _split(self):
"""
Split the card image between the keyword and the rest of the card.
"""
if self._image is not None:
# If we already have a card image, don't try to rebuild a new card
# image, which self.image would do
image = self._image
else:
image = self.image
# Split cards with CONTINUE cards or commentary keywords with long
# values
if len(self._image) > self.length:
values = []
comments = []
keyword = None
for card in self._itersubcards():
kw, vc = card._split()
if keyword is None:
keyword = kw
if keyword in self._commentary_keywords:
values.append(vc)
continue
# Should match a string followed by a comment; if not it
# might be an invalid Card, so we just take it verbatim
m = self._strg_comment_RE.match(vc)
if not m:
return kw, vc
value = m.group("strg") or ""
value = value.rstrip().replace("''", "'")
if value and value[-1] == "&":
value = value[:-1]
values.append(value)
comment = m.group("comm")
if comment:
comments.append(comment.rstrip())
if keyword in self._commentary_keywords:
valuecomment = "".join(values)
else:
# CONTINUE card
valuecomment = f"'{''.join(values)}' / {' '.join(comments)}"
return keyword, valuecomment
if self.keyword in self._special_keywords:
keyword, valuecomment = image.split(" ", 1)
else:
try:
delim_index = image.index(self._value_indicator)
except ValueError:
delim_index = None
# The equal sign may not be any higher than column 10; anything
# past that must be considered part of the card value
if delim_index is None:
keyword = image[:KEYWORD_LENGTH]
valuecomment = image[KEYWORD_LENGTH:]
elif delim_index > 10 and image[:9] != "HIERARCH ":
keyword = image[:8]
valuecomment = image[8:]
else:
keyword, valuecomment = image.split(self._value_indicator, 1)
return keyword.strip(), valuecomment.strip()
def _fix_keyword(self):
if self.field_specifier:
keyword, field_specifier = self._keyword.split(".", 1)
self._keyword = ".".join([keyword.upper(), field_specifier])
else:
self._keyword = self._keyword.upper()
self._modified = True
def _fix_value(self):
"""Fix the card image for fixable non-standard compliance."""
value = None
keyword, valuecomment = self._split()
m = self._value_NFSC_RE.match(valuecomment)
# for the unparsable case
if m is None:
try:
value, comment = valuecomment.split("/", 1)
self.value = value.strip()
self.comment = comment.strip()
except (ValueError, IndexError):
self.value = valuecomment
self._valuestring = self._value
return
elif m.group("numr") is not None:
numr = self._number_NFSC_RE.match(m.group("numr"))
value = translate(numr.group("digt"), FIX_FP_TABLE, " ")
if numr.group("sign") is not None:
value = numr.group("sign") + value
elif m.group("cplx") is not None:
real = self._number_NFSC_RE.match(m.group("real"))
rdigt = translate(real.group("digt"), FIX_FP_TABLE, " ")
if real.group("sign") is not None:
rdigt = real.group("sign") + rdigt
imag = self._number_NFSC_RE.match(m.group("imag"))
idigt = translate(imag.group("digt"), FIX_FP_TABLE, " ")
if imag.group("sign") is not None:
idigt = imag.group("sign") + idigt
value = f"({rdigt}, {idigt})"
self._valuestring = value
# The value itself has not been modified, but its serialized
# representation (as stored in self._valuestring) has been changed, so
# still set this card as having been modified (see ticket #137)
self._modified = True
def _format_keyword(self):
if self.keyword:
if self.field_specifier:
return "{:{len}}".format(
self.keyword.split(".", 1)[0], len=KEYWORD_LENGTH
)
elif self._hierarch:
return f"HIERARCH {self.keyword} "
else:
return "{:{len}}".format(self.keyword, len=KEYWORD_LENGTH)
else:
return " " * KEYWORD_LENGTH
def _format_value(self):
# value string
float_types = (float, np.floating, complex, np.complexfloating)
# Force the value to be parsed out first
value = self.value
# But work with the underlying raw value instead (to preserve
# whitespace, for now...)
value = self._value
if self.keyword in self._commentary_keywords:
# The value of a commentary card must be just a raw unprocessed
# string
value = str(value)
elif (
self._valuestring
and not self._valuemodified
and isinstance(self.value, float_types)
):
# Keep the existing formatting for float/complex numbers
value = f"{self._valuestring:>20}"
elif self.field_specifier:
value = _format_value(self._value).strip()
value = f"'{self.field_specifier}: {value}'"
else:
value = _format_value(value)
# For HIERARCH cards the value should be shortened to conserve space
if not self.field_specifier and len(self.keyword) > KEYWORD_LENGTH:
value = value.strip()
return value
def _format_comment(self):
if not self.comment:
return ""
else:
return f" / {self._comment}"
def _format_image(self):
keyword = self._format_keyword()
value = self._format_value()
is_commentary = keyword.strip() in self._commentary_keywords
if is_commentary:
comment = ""
else:
comment = self._format_comment()
# equal sign string
# by default use the standard value indicator even for HIERARCH cards;
# later we may abbreviate it if necessary
delimiter = VALUE_INDICATOR
if is_commentary:
delimiter = ""
# put all parts together
output = "".join([keyword, delimiter, value, comment])
# For HIERARCH cards we can save a bit of space if necessary by
# removing the space between the keyword and the equals sign; I'm
# guessing this is part of the HIEARCH card specification
keywordvalue_length = len(keyword) + len(delimiter) + len(value)
if keywordvalue_length > self.length and keyword.startswith("HIERARCH"):
if keywordvalue_length == self.length + 1 and keyword[-1] == " ":
output = "".join([keyword[:-1], delimiter, value, comment])
else:
# I guess the HIERARCH card spec is incompatible with CONTINUE
# cards
raise ValueError(
"The header keyword {!r} with its value is too long".format(
self.keyword
)
)
if len(output) <= self.length:
output = f"{output:80}"
else:
# longstring case (CONTINUE card)
# try not to use CONTINUE if the string value can fit in one line.
# Instead, just truncate the comment
if isinstance(self.value, str) and len(value) > (self.length - 10):
output = self._format_long_image()
else:
warnings.warn(
"Card is too long, comment will be truncated.", VerifyWarning
)
output = output[: Card.length]
return output
def _format_long_image(self):
"""
Break up long string value/comment into ``CONTINUE`` cards.
This is a primitive implementation: it will put the value
string in one block and the comment string in another. Also,
it does not break at the blank space between words. So it may
not look pretty.
"""
if self.keyword in Card._commentary_keywords:
return self._format_long_commentary_image()
value_length = 67
comment_length = 64
output = []
# do the value string
value = self._value.replace("'", "''")
words = _words_group(value, value_length)
for idx, word in enumerate(words):
if idx == 0:
headstr = "{:{len}}= ".format(self.keyword, len=KEYWORD_LENGTH)
else:
headstr = "CONTINUE "
# If this is the final CONTINUE remove the '&'
if not self.comment and idx == len(words) - 1:
value_format = "'{}'"
else:
value_format = "'{}&'"
value = value_format.format(word)
output.append(f"{headstr + value:80}")
# do the comment string
comment_format = "{}"
if self.comment:
words = _words_group(self.comment, comment_length)
for idx, word in enumerate(words):
# If this is the final CONTINUE remove the '&'
if idx == len(words) - 1:
headstr = "CONTINUE '' / "
else:
headstr = "CONTINUE '&' / "
comment = headstr + comment_format.format(word)
output.append(f"{comment:80}")
return "".join(output)
def _format_long_commentary_image(self):
"""
If a commentary card's value is too long to fit on a single card, this
will render the card as multiple consecutive commentary card of the
same type.
"""
maxlen = Card.length - KEYWORD_LENGTH
value = self._format_value()
output = []
idx = 0
while idx < len(value):
output.append(str(Card(self.keyword, value[idx : idx + maxlen])))
idx += maxlen
return "".join(output)
def _verify(self, option="warn"):
errs = []
fix_text = f"Fixed {self.keyword!r} card to meet the FITS standard."
# Don't try to verify cards that already don't meet any recognizable
# standard
if self._invalid:
return _ErrList(errs)
# verify the equal sign position
if self.keyword not in self._commentary_keywords and (
self._image
and self._image[:9].upper() != "HIERARCH "
and self._image.find("=") != 8
):
errs.append(
dict(
err_text=(
"Card {!r} is not FITS standard (equal sign not "
"at column 8).".format(self.keyword)
),
fix_text=fix_text,
fix=self._fix_value,
)
)
# verify the key, it is never fixable
# always fix silently the case where "=" is before column 9,
# since there is no way to communicate back to the _keys.
if (self._image and self._image[:8].upper() == "HIERARCH") or self._hierarch:
pass
else:
if self._image:
# PyFITS will auto-uppercase any standard keyword, so lowercase
# keywords can only occur if they came from the wild
keyword = self._split()[0]
if keyword != keyword.upper():
# Keyword should be uppercase unless it's a HIERARCH card
errs.append(
dict(
err_text=f"Card keyword {keyword!r} is not upper case.",
fix_text=fix_text,
fix=self._fix_keyword,
)
)
keyword = self.keyword
if self.field_specifier:
keyword = keyword.split(".", 1)[0]
if not self._keywd_FSC_RE.match(keyword):
errs.append(
dict(err_text=f"Illegal keyword name {keyword!r}", fixable=False)
)
# verify the value, it may be fixable
keyword, valuecomment = self._split()
if self.keyword in self._commentary_keywords:
# For commentary keywords all that needs to be ensured is that it
# contains only printable ASCII characters
if not self._ascii_text_re.match(valuecomment):
errs.append(
dict(
err_text=(
"Unprintable string {!r}; commentary cards may "
"only contain printable ASCII characters".format(
valuecomment
)
),
fixable=False,
)
)
else:
if not self._valuemodified:
m = self._value_FSC_RE.match(valuecomment)
# If the value of a card was replaced before the card was ever
# even verified, the new value can be considered valid, so we
# don't bother verifying the old value. See
# https://github.com/astropy/astropy/issues/5408
if m is None:
errs.append(
dict(
err_text=(
f"Card {self.keyword!r} is not FITS standard "
f"(invalid value string: {valuecomment!r})."
),
fix_text=fix_text,
fix=self._fix_value,
)
)
# verify the comment (string), it is never fixable
m = self._value_NFSC_RE.match(valuecomment)
if m is not None:
comment = m.group("comm")
if comment is not None:
if not self._ascii_text_re.match(comment):
errs.append(
dict(
err_text=(
f"Unprintable string {comment!r}; header "
"comments may only contain printable "
"ASCII characters"
),
fixable=False,
)
)
errs = _ErrList([self.run_option(option, **err) for err in errs])
self._verified = True
return errs
def _itersubcards(self):
"""
If the card image is greater than 80 characters, it should consist of a
normal card followed by one or more CONTINUE card. This method returns
the subcards that make up this logical card.
This can also support the case where a HISTORY or COMMENT card has a
long value that is stored internally as multiple concatenated card
images.
"""
ncards = len(self._image) // Card.length
for idx in range(0, Card.length * ncards, Card.length):
card = Card.fromstring(self._image[idx : idx + Card.length])
if idx > 0 and card.keyword.upper() not in self._special_keywords:
raise VerifyError(
"Long card images must have CONTINUE cards after "
"the first card or have commentary keywords like "
"HISTORY or COMMENT."
)
if not isinstance(card.value, str):
raise VerifyError("CONTINUE cards must have string values.")
yield card
def _int_or_float(s):
"""
Converts an a string to an int if possible, or to a float.
If the string is neither a string or a float a value error is raised.
"""
if isinstance(s, float):
# Already a float so just pass through
return s
try:
return int(s)
except (ValueError, TypeError):
try:
return float(s)
except (ValueError, TypeError) as e:
raise ValueError(str(e))
def _format_value(value):
"""
Converts a card value to its appropriate string representation as
defined by the FITS format.
"""
# string value should occupies at least 8 columns, unless it is
# a null string
if isinstance(value, str):
if value == "":
return "''"
else:
exp_val_str = value.replace("'", "''")
val_str = f"'{exp_val_str:8}'"
return f"{val_str:20}"
# must be before int checking since bool is also int
elif isinstance(value, (bool, np.bool_)):
return f"{repr(value)[0]:>20}" # T or F
elif _is_int(value):
return f"{value:>20d}"
elif isinstance(value, (float, np.floating)):
return f"{_format_float(value):>20}"
elif isinstance(value, (complex, np.complexfloating)):
val_str = f"({_format_float(value.real)}, {_format_float(value.imag)})"
return f"{val_str:>20}"
elif isinstance(value, Undefined):
return ""
else:
return ""
def _format_float(value):
"""Format a floating number to make sure it gets the decimal point."""
value_str = f"{value:.16G}"
if "." not in value_str and "E" not in value_str:
value_str += ".0"
elif "E" in value_str:
# On some Windows builds of Python (and possibly other platforms?) the
# exponent is zero-padded out to, it seems, three digits. Normalize
# the format to pad only to two digits.
significand, exponent = value_str.split("E")
if exponent[0] in ("+", "-"):
sign = exponent[0]
exponent = exponent[1:]
else:
sign = ""
value_str = f"{significand}E{sign}{int(exponent):02d}"
# Limit the value string to at most 20 characters.
str_len = len(value_str)
if str_len > 20:
idx = value_str.find("E")
if idx < 0:
value_str = value_str[:20]
else:
value_str = value_str[: 20 - (str_len - idx)] + value_str[idx:]
return value_str
def _pad(input):
"""Pad blank space to the input string to be multiple of 80."""
_len = len(input)
if _len == Card.length:
return input
elif _len > Card.length:
strlen = _len % Card.length
if strlen == 0:
return input
else:
return input + " " * (Card.length - strlen)
# minimum length is 80
else:
strlen = _len % Card.length
return input + " " * (Card.length - strlen)
|
975bb92177daec43fee3424604551101d8f33099bf17d62aa5480d6e22001068 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import operator
import warnings
from astropy.utils import indent
from astropy.utils.exceptions import AstropyUserWarning
class VerifyError(Exception):
"""
Verify exception class.
"""
class VerifyWarning(AstropyUserWarning):
"""
Verify warning class.
"""
VERIFY_OPTIONS = [
"ignore",
"warn",
"exception",
"fix",
"silentfix",
"fix+ignore",
"fix+warn",
"fix+exception",
"silentfix+ignore",
"silentfix+warn",
"silentfix+exception",
]
class _Verify:
"""
Shared methods for verification.
"""
def run_option(
self, option="warn", err_text="", fix_text="Fixed.", fix=None, fixable=True
):
"""
Execute the verification with selected option.
"""
text = err_text
if option in ["warn", "exception"]:
fixable = False
# fix the value
elif not fixable:
text = f"Unfixable error: {text}"
else:
if fix:
fix()
text += " " + fix_text
return (fixable, text)
def verify(self, option="warn"):
"""
Verify all values in the instance.
Parameters
----------
option : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``"+warn"``, or ``"+exception"``
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
"""
opt = option.lower()
if opt not in VERIFY_OPTIONS:
raise ValueError(f"Option {option!r} not recognized.")
if opt == "ignore":
return
errs = self._verify(opt)
# Break the verify option into separate options related to reporting of
# errors, and fixing of fixable errors
if "+" in opt:
fix_opt, report_opt = opt.split("+")
elif opt in ["fix", "silentfix"]:
# The original default behavior for 'fix' and 'silentfix' was to
# raise an exception for unfixable errors
fix_opt, report_opt = opt, "exception"
else:
fix_opt, report_opt = None, opt
if fix_opt == "silentfix" and report_opt == "ignore":
# Fixable errors were fixed, but don't report anything
return
if fix_opt == "silentfix":
# Don't print out fixable issues; the first element of each verify
# item is a boolean indicating whether or not the issue was fixable
line_filter = lambda x: not x[0]
elif fix_opt == "fix" and report_opt == "ignore":
# Don't print *unfixable* issues, but do print fixed issues; this
# is probably not very useful but the option exists for
# completeness
line_filter = operator.itemgetter(0)
else:
line_filter = None
unfixable = False
messages = []
for fixable, message in errs.iter_lines(filter=line_filter):
if fixable is not None:
unfixable = not fixable
messages.append(message)
if messages:
messages.insert(0, "Verification reported errors:")
messages.append("Note: astropy.io.fits uses zero-based indexing.\n")
if fix_opt == "silentfix" and not unfixable:
return
elif report_opt == "warn" or (fix_opt == "fix" and not unfixable):
for line in messages:
warnings.warn(line, VerifyWarning)
else:
raise VerifyError("\n" + "\n".join(messages))
class _ErrList(list):
"""
Verification errors list class. It has a nested list structure
constructed by error messages generated by verifications at
different class levels.
"""
def __init__(self, val=(), unit="Element"):
super().__init__(val)
self.unit = unit
def __str__(self):
return "\n".join(item[1] for item in self.iter_lines())
def iter_lines(self, filter=None, shift=0):
"""
Iterate the nested structure as a list of strings with appropriate
indentations for each level of structure.
"""
element = 0
# go through the list twice, first time print out all top level
# messages
for item in self:
if not isinstance(item, _ErrList):
if filter is None or filter(item):
yield item[0], indent(item[1], shift=shift)
# second time go through the next level items, each of the next level
# must present, even it has nothing.
for item in self:
if isinstance(item, _ErrList):
next_lines = item.iter_lines(filter=filter, shift=shift + 1)
try:
first_line = next(next_lines)
except StopIteration:
first_line = None
if first_line is not None:
if self.unit:
# This line is sort of a header for the next level in
# the hierarchy
yield None, indent(f"{self.unit} {element}:", shift=shift)
yield first_line
yield from next_lines
element += 1
|
009321e017d760acb663ef6c0bc33eb658e95c74cba305abb3236a86988206bb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
cds.py:
Classes to read CDS / Vizier table format
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import fnmatch
import itertools
import os
import re
from contextlib import suppress
from astropy.units import Unit
from . import core, fixedwidth
__doctest_skip__ = ["*"]
class CdsHeader(core.BaseHeader):
_subfmt = "CDS"
col_type_map = {
"e": core.FloatType,
"f": core.FloatType,
"i": core.IntType,
"a": core.StrType,
}
"The ReadMe file to construct header from."
readme = None
def get_type_map_key(self, col):
match = re.match(r"\d*(\S)", col.raw_type.lower())
if not match:
raise ValueError(
f'Unrecognized {self._subfmt} format "{col.raw_type}" for column'
f'"{col.name}"'
)
return match.group(1)
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines`` for a CDS/MRT
header.
Parameters
----------
lines : list
List of table lines
"""
# Read header block for the table ``self.data.table_name`` from the read
# me file ``self.readme``.
if self.readme and self.data.table_name:
in_header = False
readme_inputter = core.BaseInputter()
f = readme_inputter.get_lines(self.readme)
# Header info is not in data lines but in a separate file.
lines = []
comment_lines = 0
for line in f:
line = line.strip()
if in_header:
lines.append(line)
if line.startswith(("------", "=======")):
comment_lines += 1
if comment_lines == 3:
break
else:
match = re.match(
r"Byte-by-byte Description of file: (?P<name>.+)$",
line,
re.IGNORECASE,
)
if match:
# Split 'name' in case in contains multiple files
names = [s for s in re.split("[, ]+", match.group("name")) if s]
# Iterate on names to find if one matches the tablename
# including wildcards.
for pattern in names:
if fnmatch.fnmatch(self.data.table_name, pattern):
in_header = True
lines.append(line)
break
else:
raise core.InconsistentTableError(
f"Can't find table {self.data.table_name} in {self.readme}"
)
found_line = False
for i_col_def, line in enumerate(lines):
if re.match(r"Byte-by-byte Description", line, re.IGNORECASE):
found_line = True
elif found_line: # First line after list of file descriptions
i_col_def -= 1 # Set i_col_def to last description line
break
else:
raise ValueError('no line with "Byte-by-byte Description" found')
re_col_def = re.compile(
r"""\s*
(?P<start> \d+ \s* -)? \s*
(?P<end> \d+) \s+
(?P<format> [\w.]+) \s+
(?P<units> \S+) \s+
(?P<name> \S+)
(\s+ (?P<descr> \S.*))?""",
re.VERBOSE,
)
cols = []
for line in itertools.islice(lines, i_col_def + 4, None):
if line.startswith(("------", "=======")):
break
match = re_col_def.match(line)
if match:
col = core.Column(name=match.group("name"))
col.start = int(
re.sub(r'[-\s]', '', match.group('start') or match.group('end'))) - 1 # fmt: skip
col.end = int(match.group("end"))
unit = match.group("units")
if unit == "---":
col.unit = None # "---" is the marker for no unit in CDS/MRT table
else:
col.unit = Unit(unit, format="cds", parse_strict="warn")
col.description = (match.group("descr") or "").strip()
col.raw_type = match.group("format")
col.type = self.get_col_type(col)
match = re.match(
# Matches limits specifier (eg []) that may or may not be
# present
r"(?P<limits>[\[\]] \S* [\[\]])?"
# Matches '?' directly
r"\?"
# Matches to nullval if and only if '=' is present
r"((?P<equal>=)(?P<nullval> \S*))?"
# Matches to order specifier: ('+', '-', '+=', '-=')
r"(?P<order>[-+]?[=]?)"
# Matches description text even even if no whitespace is
# present after '?'
r"(\s* (?P<descriptiontext> \S.*))?",
col.description,
re.VERBOSE,
)
if match:
col.description = (match.group("descriptiontext") or "").strip()
if issubclass(col.type, core.FloatType):
fillval = "nan"
else:
fillval = "0"
if match.group("nullval") == "-":
col.null = "---"
# CDS/MRT tables can use -, --, ---, or ---- to mark missing values
# see https://github.com/astropy/astropy/issues/1335
for i in [1, 2, 3, 4]:
self.data.fill_values.append(("-" * i, fillval, col.name))
else:
col.null = match.group("nullval")
if col.null is None:
col.null = ""
self.data.fill_values.append((col.null, fillval, col.name))
cols.append(col)
else: # could be a continuation of the previous col's description
if cols:
cols[-1].description += line.strip()
else:
raise ValueError(f'Line "{line}" not parsable as CDS header')
self.names = [x.name for x in cols]
self.cols = cols
class CdsData(core.BaseData):
"""CDS table data reader"""
_subfmt = "CDS"
splitter_class = fixedwidth.FixedWidthSplitter
def process_lines(self, lines):
"""Skip over CDS/MRT header by finding the last section delimiter"""
# If the header has a ReadMe and data has a filename
# then no need to skip, as the data lines do not have header
# info. The ``read`` method adds the table_name to the ``data``
# attribute.
if self.header.readme and self.table_name:
return lines
i_sections = [
i for i, x in enumerate(lines) if x.startswith(("------", "======="))
]
if not i_sections:
raise core.InconsistentTableError(
f"No {self._subfmt} section delimiter found"
)
return lines[i_sections[-1] + 1 :]
class Cds(core.BaseReader):
"""CDS format table.
See: http://vizier.u-strasbg.fr/doc/catstd.htx
Example::
Table: Table name here
= ==============================================================================
Catalog reference paper
Bibliography info here
================================================================================
ADC_Keywords: Keyword ; Another keyword ; etc
Description:
Catalog description here.
================================================================================
Byte-by-byte Description of file: datafile3.txt
--------------------------------------------------------------------------------
Bytes Format Units Label Explanations
--------------------------------------------------------------------------------
1- 3 I3 --- Index Running identification number
5- 6 I2 h RAh Hour of Right Ascension (J2000)
8- 9 I2 min RAm Minute of Right Ascension (J2000)
11- 15 F5.2 s RAs Second of Right Ascension (J2000)
--------------------------------------------------------------------------------
Note (1): A CDS file can contain sections with various metadata.
Notes can be multiple lines.
Note (2): Another note.
--------------------------------------------------------------------------------
1 03 28 39.09
2 04 18 24.11
**About parsing the CDS format**
The CDS format consists of a table description and the table data. These
can be in separate files as a ``ReadMe`` file plus data file(s), or
combined in a single file. Different subsections within the description
are separated by lines of dashes or equal signs ("------" or "======").
The table which specifies the column information must be preceded by a line
starting with "Byte-by-byte Description of file:".
In the case where the table description is combined with the data values,
the data must be in the last section and must be preceded by a section
delimiter line (dashes or equal signs only).
**Basic usage**
Use the ``ascii.read()`` function as normal, with an optional ``readme``
parameter indicating the CDS ReadMe file. If not supplied it is assumed that
the header information is at the top of the given table. Examples::
>>> from astropy.io import ascii
>>> table = ascii.read("data/cds.dat")
>>> table = ascii.read("data/vizier/table1.dat", readme="data/vizier/ReadMe")
>>> table = ascii.read("data/cds/multi/lhs2065.dat", readme="data/cds/multi/ReadMe")
>>> table = ascii.read("data/cds/glob/lmxbrefs.dat", readme="data/cds/glob/ReadMe")
The table name and the CDS ReadMe file can be entered as URLs. This can be used
to directly load tables from the Internet. For example, Vizier tables from the
CDS::
>>> table = ascii.read("ftp://cdsarc.u-strasbg.fr/pub/cats/VII/253/snrs.dat",
... readme="ftp://cdsarc.u-strasbg.fr/pub/cats/VII/253/ReadMe")
If the header (ReadMe) and data are stored in a single file and there
is content between the header and the data (for instance Notes), then the
parsing process may fail. In this case you can instruct the reader to
guess the actual start of the data by supplying ``data_start='guess'`` in the
call to the ``ascii.read()`` function. You should verify that the output
data table matches expectation based on the input CDS file.
**Using a reader object**
When ``Cds`` reader object is created with a ``readme`` parameter
passed to it at initialization, then when the ``read`` method is
executed with a table filename, the header information for the
specified table is taken from the ``readme`` file. An
``InconsistentTableError`` is raised if the ``readme`` file does not
have header information for the given table.
>>> readme = "data/vizier/ReadMe"
>>> r = ascii.get_reader(ascii.Cds, readme=readme)
>>> table = r.read("data/vizier/table1.dat")
>>> # table5.dat has the same ReadMe file
>>> table = r.read("data/vizier/table5.dat")
If no ``readme`` parameter is specified, then the header
information is assumed to be at the top of the given table.
>>> r = ascii.get_reader(ascii.Cds)
>>> table = r.read("data/cds.dat")
>>> #The following gives InconsistentTableError, since no
>>> #readme file was given and table1.dat does not have a header.
>>> table = r.read("data/vizier/table1.dat")
Traceback (most recent call last):
...
InconsistentTableError: No CDS section delimiter found
Caveats:
* The Units and Explanations are available in the column ``unit`` and
``description`` attributes, respectively.
* The other metadata defined by this format is not available in the output table.
"""
_format_name = "cds"
_io_registry_format_aliases = ["cds"]
_io_registry_can_write = False
_description = "CDS format table"
data_class = CdsData
header_class = CdsHeader
def __init__(self, readme=None):
super().__init__()
self.header.readme = readme
def write(self, table=None):
"""Not available for the CDS class (raises NotImplementedError)"""
raise NotImplementedError
def read(self, table):
# If the read kwarg `data_start` is 'guess' then the table may have extraneous
# lines between the end of the header and the beginning of data.
if self.data.start_line == "guess":
# Replicate the first part of BaseReader.read up to the point where
# the table lines are initially read in.
with suppress(TypeError):
# For strings only
if os.linesep not in table + "":
self.data.table_name = os.path.basename(table)
self.data.header = self.header
self.header.data = self.data
# Get a list of the lines (rows) in the table
lines = self.inputter.get_lines(table)
# Now try increasing data.start_line by one until the table reads successfully.
# For efficiency use the in-memory list of lines instead of `table`, which
# could be a file.
for data_start in range(len(lines)):
self.data.start_line = data_start
with suppress(Exception):
table = super().read(lines)
return table
else:
return super().read(table)
|
66fead0239f39c4205ecc445ce6436d45d024b418b35d7dd5e5828e37ded555f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
basic.py:
Basic table read / write functionality for simple character
delimited files with various options for column header definition.
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import re
from . import core
class BasicHeader(core.BaseHeader):
"""
Basic table Header Reader
Set a few defaults for common ascii table formats
(start at line 0, comments begin with ``#`` and possibly white space)
"""
start_line = 0
comment = r"\s*#"
write_comment = "# "
class BasicData(core.BaseData):
"""
Basic table Data Reader
Set a few defaults for common ascii table formats
(start at line 1, comments begin with ``#`` and possibly white space)
"""
start_line = 1
comment = r"\s*#"
write_comment = "# "
class Basic(core.BaseReader):
r"""Character-delimited table with a single header line at the top.
Lines beginning with a comment character (default='#') as the first
non-whitespace character are comments.
Example table::
# Column definition is the first uncommented line
# Default delimiter is the space character.
apples oranges pears
# Data starts after the header column definition, blank lines ignored
1 2 3
4 5 6
"""
_format_name = "basic"
_description = "Basic table with custom delimiters"
_io_registry_format_aliases = ["ascii"]
header_class = BasicHeader
data_class = BasicData
class NoHeaderHeader(BasicHeader):
"""
Reader for table header without a header
Set the start of header line number to `None`, which tells the basic
reader there is no header line.
"""
start_line = None
class NoHeaderData(BasicData):
"""
Reader for table data without a header
Data starts at first uncommented line since there is no header line.
"""
start_line = 0
class NoHeader(Basic):
"""Character-delimited table with no header line.
When reading, columns are autonamed using header.auto_format which defaults
to "col%d". Otherwise this reader the same as the :class:`Basic` class
from which it is derived. Example::
# Table data
1 2 "hello there"
3 4 world
"""
_format_name = "no_header"
_description = "Basic table with no headers"
header_class = NoHeaderHeader
data_class = NoHeaderData
class CommentedHeaderHeader(BasicHeader):
"""
Header class for which the column definition line starts with the
comment character. See the :class:`CommentedHeader` class for an example.
"""
def process_lines(self, lines):
"""
Return only lines that start with the comment regexp. For these
lines strip out the matching characters.
"""
re_comment = re.compile(self.comment)
for line in lines:
match = re_comment.match(line)
if match:
yield line[match.end() :]
def write(self, lines):
lines.append(self.write_comment + self.splitter.join(self.colnames))
class CommentedHeader(Basic):
"""Character-delimited table with column names in a comment line.
When reading, ``header_start`` can be used to specify the
line index of column names, and it can be a negative index (for example -1
for the last commented line). The default delimiter is the <space>
character.
This matches the format produced by ``np.savetxt()``, with ``delimiter=','``,
and ``header='<comma-delimited-column-names-list>'``.
Example::
# col1 col2 col3
# Comment line
1 2 3
4 5 6
"""
_format_name = "commented_header"
_description = "Column names in a commented line"
header_class = CommentedHeaderHeader
data_class = NoHeaderData
def read(self, table):
"""
Read input data (file-like object, filename, list of strings, or
single string) into a Table and return the result.
"""
out = super().read(table)
# Strip off the comment line set as the header line for
# commented_header format (first by default).
if "comments" in out.meta:
idx = self.header.start_line
if idx < 0:
idx = len(out.meta["comments"]) + idx
out.meta["comments"] = (
out.meta["comments"][:idx] + out.meta["comments"][idx + 1 :]
)
if not out.meta["comments"]:
del out.meta["comments"]
return out
def write_header(self, lines, meta):
"""
Write comment lines after, rather than before, the header.
"""
self.header.write(lines)
self.header.write_comments(lines, meta)
class TabHeaderSplitter(core.DefaultSplitter):
"""Split lines on tab and do not remove whitespace"""
delimiter = "\t"
def process_line(self, line):
return line + "\n"
class TabDataSplitter(TabHeaderSplitter):
"""
Don't strip data value whitespace since that is significant in TSV tables
"""
process_val = None
skipinitialspace = False
class TabHeader(BasicHeader):
"""
Reader for header of tables with tab separated header
"""
splitter_class = TabHeaderSplitter
class TabData(BasicData):
"""
Reader for data of tables with tab separated data
"""
splitter_class = TabDataSplitter
class Tab(Basic):
"""Tab-separated table.
Unlike the :class:`Basic` reader, whitespace is not stripped from the
beginning and end of either lines or individual column values.
Example::
col1 <tab> col2 <tab> col3
# Comment line
1 <tab> 2 <tab> 5
"""
_format_name = "tab"
_description = "Basic table with tab-separated values"
header_class = TabHeader
data_class = TabData
class CsvSplitter(core.DefaultSplitter):
"""
Split on comma for CSV (comma-separated-value) tables
"""
delimiter = ","
class CsvHeader(BasicHeader):
"""
Header that uses the :class:`astropy.io.ascii.basic.CsvSplitter`
"""
splitter_class = CsvSplitter
comment = None
write_comment = None
class CsvData(BasicData):
"""
Data that uses the :class:`astropy.io.ascii.basic.CsvSplitter`
"""
splitter_class = CsvSplitter
fill_values = [(core.masked, "")]
comment = None
write_comment = None
class Csv(Basic):
"""CSV (comma-separated-values) table.
This file format may contain rows with fewer entries than the number of
columns, a situation that occurs in output from some spreadsheet editors.
The missing entries are marked as masked in the output table.
Masked values (indicated by an empty '' field value when reading) are
written out in the same way with an empty ('') field. This is different
from the typical default for `astropy.io.ascii` in which missing values are
indicated by ``--``.
Since the `CSV format <https://tools.ietf.org/html/rfc4180>`_ does not
formally support comments, any comments defined for the table via
``tbl.meta['comments']`` are ignored by default. If you would still like to
write those comments then include a keyword ``comment='#'`` to the
``write()`` call.
Example::
num,ra,dec,radius,mag
1,32.23222,10.1211
2,38.12321,-88.1321,2.2,17.0
"""
_format_name = "csv"
_io_registry_format_aliases = ["csv"]
_io_registry_can_write = True
_io_registry_suffix = ".csv"
_description = "Comma-separated-values"
header_class = CsvHeader
data_class = CsvData
def inconsistent_handler(self, str_vals, ncols):
"""
Adjust row if it is too short.
If a data row is shorter than the header, add empty values to make it the
right length.
Note that this will *not* be called if the row already matches the header.
Parameters
----------
str_vals : list
A list of value strings from the current row of the table.
ncols : int
The expected number of entries from the table header.
Returns
-------
str_vals : list
List of strings to be parsed into data entries in the output table.
"""
if len(str_vals) < ncols:
str_vals.extend((ncols - len(str_vals)) * [""])
return str_vals
class RdbHeader(TabHeader):
"""
Header for RDB tables
"""
col_type_map = {"n": core.NumType, "s": core.StrType}
def get_type_map_key(self, col):
return col.raw_type[-1]
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines``.
This is a specialized get_cols for the RDB type:
Line 0: RDB col names
Line 1: RDB col definitions
Line 2+: RDB data rows
Parameters
----------
lines : list
List of table lines
Returns
-------
None
"""
header_lines = self.process_lines(lines) # this is a generator
header_vals_list = [hl for _, hl in zip(range(2), self.splitter(header_lines))]
if len(header_vals_list) != 2:
raise ValueError("RDB header requires 2 lines")
self.names, raw_types = header_vals_list
if len(self.names) != len(raw_types):
raise core.InconsistentTableError(
"RDB header mismatch between number of column names and column types."
)
if any(not re.match(r"\d*(N|S)$", x, re.IGNORECASE) for x in raw_types):
raise core.InconsistentTableError(
f"RDB types definitions do not all match [num](N|S): {raw_types}"
)
self._set_cols_from_names()
for col, raw_type in zip(self.cols, raw_types):
col.raw_type = raw_type
col.type = self.get_col_type(col)
def write(self, lines):
lines.append(self.splitter.join(self.colnames))
rdb_types = []
for col in self.cols:
# Check if dtype.kind is string or unicode. See help(np.core.numerictypes)
rdb_type = "S" if col.info.dtype.kind in ("S", "U") else "N"
rdb_types.append(rdb_type)
lines.append(self.splitter.join(rdb_types))
class RdbData(TabData):
"""
Data reader for RDB data. Starts reading at line 2.
"""
start_line = 2
class Rdb(Tab):
"""Tab-separated file with an extra line after the column definition line that
specifies either numeric (N) or string (S) data.
See: https://www.drdobbs.com/rdb-a-unix-command-line-database/199101326
Example::
col1 <tab> col2 <tab> col3
N <tab> S <tab> N
1 <tab> 2 <tab> 5
"""
_format_name = "rdb"
_io_registry_format_aliases = ["rdb"]
_io_registry_suffix = ".rdb"
_description = "Tab-separated with a type definition header line"
header_class = RdbHeader
data_class = RdbData
|
fbfe2816a4f49d218bda91bdff4ff6ea44ed245698972fa09af533f843d0f2e1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file connects the readers/writers to the astropy.table.Table class
import re
from astropy.io import registry as io_registry # noqa: F401
from astropy.table import Table
__all__ = []
def io_read(format, filename, **kwargs):
from .ui import read
if format != "ascii":
format = re.sub(r"^ascii\.", "", format)
kwargs["format"] = format
return read(filename, **kwargs)
def io_write(format, table, filename, **kwargs):
from .ui import write
if format != "ascii":
format = re.sub(r"^ascii\.", "", format)
kwargs["format"] = format
return write(table, filename, **kwargs)
def io_identify(suffix, origin, filepath, fileobj, *args, **kwargs):
return filepath is not None and filepath.endswith(suffix)
def _get_connectors_table():
from .core import FORMAT_CLASSES
rows = []
rows.append(
("ascii", "", "Yes", "ASCII table in any supported format (uses guessing)")
)
for format in sorted(FORMAT_CLASSES):
cls = FORMAT_CLASSES[format]
io_format = "ascii." + cls._format_name
description = getattr(cls, "_description", "")
class_link = f":class:`~{cls.__module__}.{cls.__name__}`"
suffix = getattr(cls, "_io_registry_suffix", "")
can_write = "Yes" if getattr(cls, "_io_registry_can_write", True) else ""
rows.append((io_format, suffix, can_write, f"{class_link}: {description}"))
out = Table(list(zip(*rows)), names=("Format", "Suffix", "Write", "Description"))
for colname in ("Format", "Description"):
width = max(len(x) for x in out[colname])
out[colname].format = f"%-{width}s"
return out
|
4f91f825d8d1c45faec232998a71dcde13e53a9d8598444df028310dd380558f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
""" An extensible ASCII table reader and writer.
core.py:
Core base classes and functions for reading and writing tables.
:Copyright: Smithsonian Astrophysical Observatory (2010)
:Author: Tom Aldcroft ([email protected])
"""
import copy
import csv
import fnmatch
import functools
import inspect
import itertools
import operator
import os
import re
import warnings
from collections import OrderedDict
from contextlib import suppress
from io import StringIO
import numpy
from astropy.table import Table
from astropy.utils.data import get_readable_fileobj
from astropy.utils.exceptions import AstropyWarning
from . import connect
from .docs import READ_DOCSTRING, WRITE_DOCSTRING
# Global dictionary mapping format arg to the corresponding Reader class
FORMAT_CLASSES = {}
# Similar dictionary for fast readers
FAST_CLASSES = {}
def _check_multidim_table(table, max_ndim):
"""Check that ``table`` has only columns with ndim <= ``max_ndim``
Currently ECSV is the only built-in format that supports output of arbitrary
N-d columns, but HTML supports 2-d.
"""
# No limit?
if max_ndim is None:
return
# Check for N-d columns
nd_names = [col.info.name for col in table.itercols() if len(col.shape) > max_ndim]
if nd_names:
raise ValueError(
f"column(s) with dimension > {max_ndim} "
"cannot be be written with this format, try using 'ecsv' "
"(Enhanced CSV) format"
)
class CsvWriter:
"""
Internal class to replace the csv writer ``writerow`` and ``writerows``
functions so that in the case of ``delimiter=' '`` and
``quoting=csv.QUOTE_MINIMAL``, the output field value is quoted for empty
fields (when value == '').
This changes the API slightly in that the writerow() and writerows()
methods return the output written string instead of the length of
that string.
Examples
--------
>>> from astropy.io.ascii.core import CsvWriter
>>> writer = CsvWriter(delimiter=' ')
>>> print(writer.writerow(['hello', '', 'world']))
hello "" world
"""
# Random 16-character string that gets injected instead of any
# empty fields and is then replaced post-write with doubled-quotechar.
# Created with:
# ''.join(random.choice(string.printable[:90]) for _ in range(16))
replace_sentinel = "2b=48Av%0-V3p>bX"
def __init__(self, csvfile=None, **kwargs):
self.csvfile = csvfile
# Temporary StringIO for catching the real csv.writer() object output
self.temp_out = StringIO()
self.writer = csv.writer(self.temp_out, **kwargs)
dialect = self.writer.dialect
self.quotechar2 = dialect.quotechar * 2
self.quote_empty = (dialect.quoting == csv.QUOTE_MINIMAL) and (
dialect.delimiter == " "
)
def writerow(self, values):
"""
Similar to csv.writer.writerow but with the custom quoting behavior.
Returns the written string instead of the length of that string.
"""
has_empty = False
# If QUOTE_MINIMAL and space-delimited then replace empty fields with
# the sentinel value.
if self.quote_empty:
for i, value in enumerate(values):
if value == "":
has_empty = True
values[i] = self.replace_sentinel
return self._writerow(self.writer.writerow, values, has_empty)
def writerows(self, values_list):
"""
Similar to csv.writer.writerows but with the custom quoting behavior.
Returns the written string instead of the length of that string.
"""
has_empty = False
# If QUOTE_MINIMAL and space-delimited then replace empty fields with
# the sentinel value.
if self.quote_empty:
for values in values_list:
for i, value in enumerate(values):
if value == "":
has_empty = True
values[i] = self.replace_sentinel
return self._writerow(self.writer.writerows, values_list, has_empty)
def _writerow(self, writerow_func, values, has_empty):
"""
Call ``writerow_func`` (either writerow or writerows) with ``values``.
If it has empty fields that have been replaced then change those
sentinel strings back to quoted empty strings, e.g. ``""``.
"""
# Clear the temporary StringIO buffer that self.writer writes into and
# then call the real csv.writer().writerow or writerows with values.
self.temp_out.seek(0)
self.temp_out.truncate()
writerow_func(values)
row_string = self.temp_out.getvalue()
if self.quote_empty and has_empty:
row_string = re.sub(self.replace_sentinel, self.quotechar2, row_string)
# self.csvfile is defined then write the output. In practice the pure
# Python writer calls with csvfile=None, while the fast writer calls with
# a file-like object.
if self.csvfile:
self.csvfile.write(row_string)
return row_string
class MaskedConstant(numpy.ma.core.MaskedConstant):
"""A trivial extension of numpy.ma.masked
We want to be able to put the generic term ``masked`` into a dictionary.
The constant ``numpy.ma.masked`` is not hashable (see
https://github.com/numpy/numpy/issues/4660), so we need to extend it
here with a hash value.
See https://github.com/numpy/numpy/issues/11021 for rationale for
__copy__ and __deepcopy__ methods.
"""
def __hash__(self):
"""All instances of this class shall have the same hash."""
# Any large number will do.
return 1234567890
def __copy__(self):
"""This is a singleton so just return self."""
return self
def __deepcopy__(self, memo):
return self
masked = MaskedConstant()
class InconsistentTableError(ValueError):
"""
Indicates that an input table is inconsistent in some way.
The default behavior of ``BaseReader`` is to throw an instance of
this class if a data row doesn't match the header.
"""
class OptionalTableImportError(ImportError):
"""
Indicates that a dependency for table reading is not present.
An instance of this class is raised whenever an optional reader
with certain required dependencies cannot operate because of
an ImportError.
"""
class ParameterError(NotImplementedError):
"""
Indicates that a reader cannot handle a passed parameter.
The C-based fast readers in ``io.ascii`` raise an instance of
this error class upon encountering a parameter that the
C engine cannot handle.
"""
class FastOptionsError(NotImplementedError):
"""
Indicates that one of the specified options for fast
reading is invalid.
"""
class NoType:
"""
Superclass for ``StrType`` and ``NumType`` classes.
This class is the default type of ``Column`` and provides a base
class for other data types.
"""
class StrType(NoType):
"""
Indicates that a column consists of text data.
"""
class NumType(NoType):
"""
Indicates that a column consists of numerical data.
"""
class FloatType(NumType):
"""
Describes floating-point data.
"""
class BoolType(NoType):
"""
Describes boolean data.
"""
class IntType(NumType):
"""
Describes integer data.
"""
class AllType(StrType, FloatType, IntType):
"""
Subclass of all other data types.
This type is returned by ``convert_numpy`` if the given numpy
type does not match ``StrType``, ``FloatType``, or ``IntType``.
"""
class Column:
"""Table column.
The key attributes of a Column object are:
* **name** : column name
* **type** : column type (NoType, StrType, NumType, FloatType, IntType)
* **dtype** : numpy dtype (optional, overrides **type** if set)
* **str_vals** : list of column values as strings
* **fill_values** : dict of fill values
* **shape** : list of element shape (default [] => scalar)
* **data** : list of converted column values
* **subtype** : actual datatype for columns serialized with JSON
"""
def __init__(self, name):
self.name = name
self.type = NoType # Generic type (Int, Float, Str etc)
self.dtype = None # Numpy dtype if available
self.str_vals = []
self.fill_values = {}
self.shape = []
self.subtype = None
class BaseInputter:
"""
Get the lines from the table input and return a list of lines.
"""
encoding = None
"""Encoding used to read the file"""
def get_lines(self, table, newline=None):
"""
Get the lines from the ``table`` input. The input table can be one of:
* File name
* String (newline separated) with all header and data lines (must have at least 2 lines)
* File-like object with read() method
* List of strings
Parameters
----------
table : str, file-like, list
Can be either a file name, string (newline separated) with all header and data
lines (must have at least 2 lines), a file-like object with a
``read()`` method, or a list of strings.
newline :
Line separator. If `None` use OS default from ``splitlines()``.
Returns
-------
lines : list
List of lines
"""
try:
if hasattr(table, "read") or (
"\n" not in table + "" and "\r" not in table + ""
):
with get_readable_fileobj(table, encoding=self.encoding) as fileobj:
table = fileobj.read()
if newline is None:
lines = table.splitlines()
else:
lines = table.split(newline)
except TypeError:
try:
# See if table supports indexing, slicing, and iteration
table[0]
table[0:1]
iter(table)
if len(table) > 1:
lines = table
else:
# treat single entry as if string had been passed directly
if newline is None:
lines = table[0].splitlines()
else:
lines = table[0].split(newline)
except TypeError:
raise TypeError(
'Input "table" must be a string (filename or data) or an iterable'
)
return self.process_lines(lines)
def process_lines(self, lines):
"""Process lines for subsequent use. In the default case do nothing.
This routine is not generally intended for removing comment lines or
stripping whitespace. These are done (if needed) in the header and
data line processing.
Override this method if something more has to be done to convert raw
input lines to the table rows. For example the
ContinuationLinesInputter derived class accounts for continuation
characters if a row is split into lines."""
return lines
class BaseSplitter:
"""
Base splitter that uses python's split method to do the work.
This does not handle quoted values. A key feature is the formulation of
__call__ as a generator that returns a list of the split line values at
each iteration.
There are two methods that are intended to be overridden, first
``process_line()`` to do pre-processing on each input line before splitting
and ``process_val()`` to do post-processing on each split string value. By
default these apply the string ``strip()`` function. These can be set to
another function via the instance attribute or be disabled entirely, for
example::
reader.header.splitter.process_val = lambda x: x.lstrip()
reader.data.splitter.process_val = None
"""
delimiter = None
""" one-character string used to separate fields """
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. This is especially useful for
whitespace-delimited files to prevent spurious columns at the beginning or end.
"""
return line.strip()
def process_val(self, val):
"""Remove whitespace at the beginning or end of value."""
return val.strip()
def __call__(self, lines):
if self.process_line:
lines = (self.process_line(x) for x in lines)
for line in lines:
vals = line.split(self.delimiter)
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals):
if self.delimiter is None:
delimiter = " "
else:
delimiter = self.delimiter
return delimiter.join(str(x) for x in vals)
class DefaultSplitter(BaseSplitter):
"""Default class to split strings into columns using python csv. The class
attributes are taken from the csv Dialect class.
Typical usage::
# lines = ..
splitter = ascii.DefaultSplitter()
for col_vals in splitter(lines):
for col_val in col_vals:
...
"""
delimiter = " "
""" one-character string used to separate fields. """
quotechar = '"'
""" control how instances of *quotechar* in a field are quoted """
doublequote = True
""" character to remove special meaning from following character """
escapechar = None
""" one-character stringto quote fields containing special characters """
quoting = csv.QUOTE_MINIMAL
""" control when quotes are recognized by the reader """
skipinitialspace = True
""" ignore whitespace immediately following the delimiter """
csv_writer = None
csv_writer_out = StringIO()
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. This is especially useful for
whitespace-delimited files to prevent spurious columns at the beginning or end.
If splitting on whitespace then replace unquoted tabs with space first"""
if self.delimiter == r"\s":
line = _replace_tab_with_space(line, self.escapechar, self.quotechar)
return line.strip() + "\n"
def process_val(self, val):
"""Remove whitespace at the beginning or end of value."""
return val.strip(" \t")
def __call__(self, lines):
"""Return an iterator over the table ``lines``, where each iterator output
is a list of the split line values.
Parameters
----------
lines : list
List of table lines
Yields
------
line : list of str
Each line's split values.
"""
if self.process_line:
lines = [self.process_line(x) for x in lines]
delimiter = " " if self.delimiter == r"\s" else self.delimiter
csv_reader = csv.reader(
lines,
delimiter=delimiter,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar,
quoting=self.quoting,
skipinitialspace=self.skipinitialspace,
)
for vals in csv_reader:
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals):
delimiter = " " if self.delimiter is None else str(self.delimiter)
if self.csv_writer is None:
self.csv_writer = CsvWriter(
delimiter=delimiter,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar,
quoting=self.quoting,
)
if self.process_val:
vals = [self.process_val(x) for x in vals]
out = self.csv_writer.writerow(vals).rstrip("\r\n")
return out
def _replace_tab_with_space(line, escapechar, quotechar):
"""Replace tabs with spaces in given string, preserving quoted substrings
Parameters
----------
line : str
String containing tabs to be replaced with spaces.
escapechar : str
Character in ``line`` used to escape special characters.
quotechar : str
Character in ``line`` indicating the start/end of a substring.
Returns
-------
line : str
A copy of ``line`` with tabs replaced by spaces, preserving quoted substrings.
"""
newline = []
in_quote = False
lastchar = "NONE"
for char in line:
if char == quotechar and lastchar != escapechar:
in_quote = not in_quote
if char == "\t" and not in_quote:
char = " "
lastchar = char
newline.append(char)
return "".join(newline)
def _get_line_index(line_or_func, lines):
"""Return the appropriate line index, depending on ``line_or_func`` which
can be either a function, a positive or negative int, or None.
"""
if hasattr(line_or_func, "__call__"):
return line_or_func(lines)
elif line_or_func:
if line_or_func >= 0:
return line_or_func
else:
n_lines = sum(1 for line in lines)
return n_lines + line_or_func
else:
return line_or_func
class BaseHeader:
"""
Base table header reader
"""
auto_format = "col{}"
""" format string for auto-generating column names """
start_line = None
""" None, int, or a function of ``lines`` that returns None or int """
comment = None
""" regular expression for comment lines """
splitter_class = DefaultSplitter
""" Splitter class for splitting data lines into columns """
names = None
""" list of names corresponding to each data column """
write_comment = False
write_spacer_lines = ["ASCII_TABLE_WRITE_SPACER_LINE"]
def __init__(self):
self.splitter = self.splitter_class()
def _set_cols_from_names(self):
self.cols = [Column(name=x) for x in self.names]
def update_meta(self, lines, meta):
"""
Extract any table-level metadata, e.g. keywords, comments, column metadata, from
the table ``lines`` and update the OrderedDict ``meta`` in place. This base
method extracts comment lines and stores them in ``meta`` for output.
"""
if self.comment:
re_comment = re.compile(self.comment)
comment_lines = [x for x in lines if re_comment.match(x)]
else:
comment_lines = []
comment_lines = [
re.sub("^" + self.comment, "", x).strip() for x in comment_lines
]
if comment_lines:
meta.setdefault("table", {})["comments"] = comment_lines
def get_cols(self, lines):
"""Initialize the header Column objects from the table ``lines``.
Based on the previously set Header attributes find or create the column names.
Sets ``self.cols`` with the list of Columns.
Parameters
----------
lines : list
List of table lines
"""
start_line = _get_line_index(self.start_line, self.process_lines(lines))
if start_line is None:
# No header line so auto-generate names from n_data_cols
# Get the data values from the first line of table data to determine n_data_cols
try:
first_data_vals = next(self.data.get_str_vals())
except StopIteration:
raise InconsistentTableError(
"No data lines found so cannot autogenerate column names"
)
n_data_cols = len(first_data_vals)
self.names = [self.auto_format.format(i) for i in range(1, n_data_cols + 1)]
else:
for i, line in enumerate(self.process_lines(lines)):
if i == start_line:
break
else: # No header line matching
raise ValueError("No header line found in table")
self.names = next(self.splitter([line]))
self._set_cols_from_names()
def process_lines(self, lines):
"""Generator to yield non-blank and non-comment lines"""
re_comment = re.compile(self.comment) if self.comment else None
# Yield non-comment lines
for line in lines:
if line.strip() and (not self.comment or not re_comment.match(line)):
yield line
def write_comments(self, lines, meta):
if self.write_comment not in (False, None):
for comment in meta.get("comments", []):
lines.append(self.write_comment + comment)
def write(self, lines):
if self.start_line is not None:
for i, spacer_line in zip(
range(self.start_line), itertools.cycle(self.write_spacer_lines)
):
lines.append(spacer_line)
lines.append(self.splitter.join([x.info.name for x in self.cols]))
@property
def colnames(self):
"""Return the column names of the table"""
return tuple(
col.name if isinstance(col, Column) else col.info.name for col in self.cols
)
def remove_columns(self, names):
"""
Remove several columns from the table.
Parameters
----------
names : list
A list containing the names of the columns to remove
"""
colnames = self.colnames
for name in names:
if name not in colnames:
raise KeyError(f"Column {name} does not exist")
self.cols = [col for col in self.cols if col.name not in names]
def rename_column(self, name, new_name):
"""
Rename a column.
Parameters
----------
name : str
The current name of the column.
new_name : str
The new name for the column
"""
try:
idx = self.colnames.index(name)
except ValueError:
raise KeyError(f"Column {name} does not exist")
col = self.cols[idx]
# For writing self.cols can contain cols that are not Column. Raise
# exception in that case.
if isinstance(col, Column):
col.name = new_name
else:
raise TypeError(f"got column type {type(col)} instead of required {Column}")
def get_type_map_key(self, col):
return col.raw_type
def get_col_type(self, col):
try:
type_map_key = self.get_type_map_key(col)
return self.col_type_map[type_map_key.lower()]
except KeyError:
raise ValueError(
'Unknown data type ""{}"" for column "{}"'.format(
col.raw_type, col.name
)
)
def check_column_names(self, names, strict_names, guessing):
"""
Check column names.
This must be done before applying the names transformation
so that guessing will fail appropriately if ``names`` is supplied.
For instance if the basic reader is given a table with no column header
row.
Parameters
----------
names : list
User-supplied list of column names
strict_names : bool
Whether to impose extra requirements on names
guessing : bool
True if this method is being called while guessing the table format
"""
if strict_names:
# Impose strict requirements on column names (normally used in guessing)
bads = [" ", ",", "|", "\t", "'", '"']
for name in self.colnames:
if (
_is_number(name)
or len(name) == 0
or name[0] in bads
or name[-1] in bads
):
raise InconsistentTableError(
f"Column name {name!r} does not meet strict name requirements"
)
# When guessing require at least two columns, except for ECSV which can
# reliably be guessed from the header requirements.
if (
guessing
and len(self.colnames) <= 1
and self.__class__.__name__ != "EcsvHeader"
):
raise ValueError(
"Table format guessing requires at least two columns, got {}".format(
list(self.colnames)
)
)
if names is not None and len(names) != len(self.colnames):
raise InconsistentTableError(
"Length of names argument ({}) does not match number"
" of table columns ({})".format(len(names), len(self.colnames))
)
class BaseData:
"""
Base table data reader.
"""
start_line = None
""" None, int, or a function of ``lines`` that returns None or int """
end_line = None
""" None, int, or a function of ``lines`` that returns None or int """
comment = None
""" Regular expression for comment lines """
splitter_class = DefaultSplitter
""" Splitter class for splitting data lines into columns """
write_spacer_lines = ["ASCII_TABLE_WRITE_SPACER_LINE"]
fill_include_names = None
fill_exclude_names = None
fill_values = [(masked, "")]
formats = {}
def __init__(self):
# Need to make sure fill_values list is instance attribute, not class attribute.
# On read, this will be overwritten by the default in the ui.read (thus, in
# the current implementation there can be no different default for different
# Readers). On write, ui.py does not specify a default, so this line here matters.
self.fill_values = copy.copy(self.fill_values)
self.formats = copy.copy(self.formats)
self.splitter = self.splitter_class()
def process_lines(self, lines):
"""
READ: Strip out comment lines and blank lines from list of ``lines``
Parameters
----------
lines : list
All lines in table
Returns
-------
lines : list
List of lines
"""
nonblank_lines = (x for x in lines if x.strip())
if self.comment:
re_comment = re.compile(self.comment)
return [x for x in nonblank_lines if not re_comment.match(x)]
else:
return [x for x in nonblank_lines]
def get_data_lines(self, lines):
"""
READ: Set ``data_lines`` attribute to lines slice comprising table data values.
"""
data_lines = self.process_lines(lines)
start_line = _get_line_index(self.start_line, data_lines)
end_line = _get_line_index(self.end_line, data_lines)
if start_line is not None or end_line is not None:
self.data_lines = data_lines[slice(start_line, end_line)]
else: # Don't copy entire data lines unless necessary
self.data_lines = data_lines
def get_str_vals(self):
"""Return a generator that returns a list of column values (as strings)
for each data line."""
return self.splitter(self.data_lines)
def masks(self, cols):
"""READ: Set fill value for each column and then apply that fill value
In the first step it is evaluated with value from ``fill_values`` applies to
which column using ``fill_include_names`` and ``fill_exclude_names``.
In the second step all replacements are done for the appropriate columns.
"""
if self.fill_values:
self._set_fill_values(cols)
self._set_masks(cols)
def _set_fill_values(self, cols):
"""READ, WRITE: Set fill values of individual cols based on fill_values of BaseData
fill values has the following form:
<fill_spec> = (<bad_value>, <fill_value>, <optional col_name>...)
fill_values = <fill_spec> or list of <fill_spec>'s
"""
if self.fill_values:
# when we write tables the columns may be astropy.table.Columns
# which don't carry a fill_values by default
for col in cols:
if not hasattr(col, "fill_values"):
col.fill_values = {}
# if input is only one <fill_spec>, then make it a list
with suppress(TypeError):
self.fill_values[0] + ""
self.fill_values = [self.fill_values]
# Step 1: Set the default list of columns which are affected by
# fill_values
colnames = set(self.header.colnames)
if self.fill_include_names is not None:
colnames.intersection_update(self.fill_include_names)
if self.fill_exclude_names is not None:
colnames.difference_update(self.fill_exclude_names)
# Step 2a: Find out which columns are affected by this tuple
# iterate over reversed order, so last condition is set first and
# overwritten by earlier conditions
for replacement in reversed(self.fill_values):
if len(replacement) < 2:
raise ValueError(
"Format of fill_values must be "
"(<bad>, <fill>, <optional col1>, ...)"
)
elif len(replacement) == 2:
affect_cols = colnames
else:
affect_cols = replacement[2:]
for i, key in (
(i, x)
for i, x in enumerate(self.header.colnames)
if x in affect_cols
):
cols[i].fill_values[replacement[0]] = str(replacement[1])
def _set_masks(self, cols):
"""READ: Replace string values in col.str_vals and set masks"""
if self.fill_values:
for col in (col for col in cols if col.fill_values):
col.mask = numpy.zeros(len(col.str_vals), dtype=bool)
for i, str_val in (
(i, x) for i, x in enumerate(col.str_vals) if x in col.fill_values
):
col.str_vals[i] = col.fill_values[str_val]
col.mask[i] = True
def _replace_vals(self, cols):
"""WRITE: replace string values in col.str_vals"""
if self.fill_values:
for col in (col for col in cols if col.fill_values):
for i, str_val in (
(i, x) for i, x in enumerate(col.str_vals) if x in col.fill_values
):
col.str_vals[i] = col.fill_values[str_val]
if masked in col.fill_values and hasattr(col, "mask"):
mask_val = col.fill_values[masked]
for i in col.mask.nonzero()[0]:
col.str_vals[i] = mask_val
def str_vals(self):
"""WRITE: convert all values in table to a list of lists of strings
This sets the fill values and possibly column formats from the input
formats={} keyword, then ends up calling table.pprint._pformat_col_iter()
by a circuitous path. That function does the real work of formatting.
Finally replace anything matching the fill_values.
Returns
-------
values : list of list of str
"""
self._set_fill_values(self.cols)
self._set_col_formats()
for col in self.cols:
col.str_vals = list(col.info.iter_str_vals())
self._replace_vals(self.cols)
return [col.str_vals for col in self.cols]
def write(self, lines):
"""Write ``self.cols`` in place to ``lines``.
Parameters
----------
lines : list
List for collecting output of writing self.cols.
"""
if hasattr(self.start_line, "__call__"):
raise TypeError("Start_line attribute cannot be callable for write()")
else:
data_start_line = self.start_line or 0
while len(lines) < data_start_line:
lines.append(itertools.cycle(self.write_spacer_lines))
col_str_iters = self.str_vals()
for vals in zip(*col_str_iters):
lines.append(self.splitter.join(vals))
def _set_col_formats(self):
"""WRITE: set column formats."""
for col in self.cols:
if col.info.name in self.formats:
col.info.format = self.formats[col.info.name]
def convert_numpy(numpy_type):
"""Return a tuple containing a function which converts a list into a numpy
array and the type produced by the converter function.
Parameters
----------
numpy_type : numpy data-type
The numpy type required of an array returned by ``converter``. Must be a
valid `numpy type <https://numpy.org/doc/stable/user/basics.types.html>`_
(e.g., numpy.uint, numpy.int8, numpy.int64, numpy.float64) or a python
type covered by a numpy type (e.g., int, float, str, bool).
Returns
-------
converter : callable
``converter`` is a function which accepts a list and converts it to a
numpy array of type ``numpy_type``.
converter_type : type
``converter_type`` tracks the generic data type produced by the
converter function.
Raises
------
ValueError
Raised by ``converter`` if the list elements could not be converted to
the required type.
"""
# Infer converter type from an instance of numpy_type.
type_name = numpy.array([], dtype=numpy_type).dtype.name
if "int" in type_name:
converter_type = IntType
elif "float" in type_name:
converter_type = FloatType
elif "bool" in type_name:
converter_type = BoolType
elif "str" in type_name:
converter_type = StrType
else:
converter_type = AllType
def bool_converter(vals):
"""
Convert values "False" and "True" to bools. Raise an exception
for any other string values.
"""
if len(vals) == 0:
return numpy.array([], dtype=bool)
# Try a smaller subset first for a long array
if len(vals) > 10000:
svals = numpy.asarray(vals[:1000])
if not numpy.all(
(svals == "False") | (svals == "True") | (svals == "0") | (svals == "1")
):
raise ValueError('bool input strings must be False, True, 0, 1, or ""')
vals = numpy.asarray(vals)
trues = (vals == "True") | (vals == "1")
falses = (vals == "False") | (vals == "0")
if not numpy.all(trues | falses):
raise ValueError('bool input strings must be only False, True, 0, 1, or ""')
return trues
def generic_converter(vals):
return numpy.array(vals, numpy_type)
converter = bool_converter if converter_type is BoolType else generic_converter
return converter, converter_type
class BaseOutputter:
"""Output table as a dict of column objects keyed on column name. The
table data are stored as plain python lists within the column objects.
"""
# User-defined converters which gets set in ascii.ui if a `converter` kwarg
# is supplied.
converters = {}
# Derived classes must define default_converters and __call__
@staticmethod
def _validate_and_copy(col, converters):
"""Validate the format for the type converters and then copy those
which are valid converters for this column (i.e. converter type is
a subclass of col.type)"""
# Allow specifying a single converter instead of a list of converters.
# The input `converters` must be a ``type`` value that can init np.dtype.
try:
# Don't allow list-like things that dtype accepts
assert type(converters) is type
converters = [numpy.dtype(converters)]
except (AssertionError, TypeError):
pass
converters_out = []
try:
for converter in converters:
try:
converter_func, converter_type = converter
except TypeError as err:
if str(err).startswith("cannot unpack"):
converter_func, converter_type = convert_numpy(converter)
else:
raise
if not issubclass(converter_type, NoType):
raise ValueError("converter_type must be a subclass of NoType")
if issubclass(converter_type, col.type):
converters_out.append((converter_func, converter_type))
except (ValueError, TypeError) as err:
raise ValueError(
"Error: invalid format for converters, see "
f"documentation\n{converters}: {err}"
)
return converters_out
def _convert_vals(self, cols):
for col in cols:
for key, converters in self.converters.items():
if fnmatch.fnmatch(col.name, key):
break
else:
if col.dtype is not None:
converters = [convert_numpy(col.dtype)]
else:
converters = self.default_converters
col.converters = self._validate_and_copy(col, converters)
# Catch the last error in order to provide additional information
# in case all attempts at column conversion fail. The initial
# value of of last_error will apply if no converters are defined
# and the first col.converters[0] access raises IndexError.
last_err = "no converters defined"
while not hasattr(col, "data"):
# Try converters, popping the unsuccessful ones from the list.
# If there are no converters left here then fail.
if not col.converters:
raise ValueError(f"Column {col.name} failed to convert: {last_err}")
converter_func, converter_type = col.converters[0]
if not issubclass(converter_type, col.type):
raise TypeError("converter type does not match column type")
try:
col.data = converter_func(col.str_vals)
col.type = converter_type
except (OverflowError, TypeError, ValueError) as err:
# Overflow during conversion (most likely an int that
# doesn't fit in native C long). Put string at the top of
# the converters list for the next while iteration.
# With python/cpython#95778 this has been supplemented with a
# "ValueError: Exceeds the limit (4300) for integer string conversion"
# so need to catch that as well.
if isinstance(err, OverflowError) or (
isinstance(err, ValueError)
and str(err).startswith("Exceeds the limit")
):
warnings.warn(
f"OverflowError converting to {converter_type.__name__} in"
f" column {col.name}, reverting to String.",
AstropyWarning,
)
col.converters.insert(0, convert_numpy(str))
else:
col.converters.pop(0)
last_err = err
def _deduplicate_names(names):
"""Ensure there are no duplicates in ``names``
This is done by iteratively adding ``_<N>`` to the name for increasing N
until the name is unique.
"""
new_names = []
existing_names = set()
for name in names:
base_name = name + "_"
i = 1
while name in existing_names:
# Iterate until a unique name is found
name = base_name + str(i)
i += 1
new_names.append(name)
existing_names.add(name)
return new_names
class TableOutputter(BaseOutputter):
"""
Output the table as an astropy.table.Table object.
"""
default_converters = [convert_numpy(int), convert_numpy(float), convert_numpy(str)]
def __call__(self, cols, meta):
# Sets col.data to numpy array and col.type to io.ascii Type class (e.g.
# FloatType) for each col.
self._convert_vals(cols)
t_cols = [
numpy.ma.MaskedArray(x.data, mask=x.mask)
if hasattr(x, "mask") and numpy.any(x.mask)
else x.data
for x in cols
]
out = Table(t_cols, names=[x.name for x in cols], meta=meta["table"])
for col, out_col in zip(cols, out.columns.values()):
for attr in ("format", "unit", "description"):
if hasattr(col, attr):
setattr(out_col, attr, getattr(col, attr))
if hasattr(col, "meta"):
out_col.meta.update(col.meta)
return out
class MetaBaseReader(type):
def __init__(cls, name, bases, dct):
super().__init__(name, bases, dct)
format = dct.get("_format_name")
if format is None:
return
fast = dct.get("_fast")
if fast is not None:
FAST_CLASSES[format] = cls
FORMAT_CLASSES[format] = cls
io_formats = ["ascii." + format] + dct.get("_io_registry_format_aliases", [])
if dct.get("_io_registry_suffix"):
func = functools.partial(connect.io_identify, dct["_io_registry_suffix"])
connect.io_registry.register_identifier(io_formats[0], Table, func)
for io_format in io_formats:
func = functools.partial(connect.io_read, io_format)
header = f"ASCII reader '{io_format}' details\n"
func.__doc__ = (
inspect.cleandoc(READ_DOCSTRING).strip()
+ "\n\n"
+ header
+ re.sub(".", "=", header)
+ "\n"
)
func.__doc__ += inspect.cleandoc(cls.__doc__).strip()
connect.io_registry.register_reader(io_format, Table, func)
if dct.get("_io_registry_can_write", True):
func = functools.partial(connect.io_write, io_format)
header = f"ASCII writer '{io_format}' details\n"
func.__doc__ = (
inspect.cleandoc(WRITE_DOCSTRING).strip()
+ "\n\n"
+ header
+ re.sub(".", "=", header)
+ "\n"
)
func.__doc__ += inspect.cleandoc(cls.__doc__).strip()
connect.io_registry.register_writer(io_format, Table, func)
def _is_number(x):
with suppress(ValueError):
x = float(x)
return True
return False
def _apply_include_exclude_names(table, names, include_names, exclude_names):
"""
Apply names, include_names and exclude_names to a table or BaseHeader.
For the latter this relies on BaseHeader implementing ``colnames``,
``rename_column``, and ``remove_columns``.
Parameters
----------
table : `~astropy.table.Table`, `~astropy.io.ascii.BaseHeader`
Input table or BaseHeader subclass instance
names : list
List of names to override those in table (set to None to use existing names)
include_names : list
List of names to include in output
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
"""
def rename_columns(table, names):
# Rename table column names to those passed by user
# Temporarily rename with names that are not in `names` or `table.colnames`.
# This ensures that rename succeeds regardless of existing names.
xxxs = "x" * max(len(name) for name in list(names) + list(table.colnames))
for ii, colname in enumerate(table.colnames):
table.rename_column(colname, xxxs + str(ii))
for ii, name in enumerate(names):
table.rename_column(xxxs + str(ii), name)
if names is not None:
rename_columns(table, names)
else:
colnames_uniq = _deduplicate_names(table.colnames)
if colnames_uniq != list(table.colnames):
rename_columns(table, colnames_uniq)
names_set = set(table.colnames)
if include_names is not None:
names_set.intersection_update(include_names)
if exclude_names is not None:
names_set.difference_update(exclude_names)
if names_set != set(table.colnames):
remove_names = set(table.colnames) - names_set
table.remove_columns(remove_names)
class BaseReader(metaclass=MetaBaseReader):
"""Class providing methods to read and write an ASCII table using the specified
header, data, inputter, and outputter instances.
Typical usage is to instantiate a Reader() object and customize the
``header``, ``data``, ``inputter``, and ``outputter`` attributes. Each
of these is an object of the corresponding class.
There is one method ``inconsistent_handler`` that can be used to customize the
behavior of ``read()`` in the event that a data row doesn't match the header.
The default behavior is to raise an InconsistentTableError.
"""
names = None
include_names = None
exclude_names = None
strict_names = False
guessing = False
encoding = None
header_class = BaseHeader
data_class = BaseData
inputter_class = BaseInputter
outputter_class = TableOutputter
# Max column dimension that writer supports for this format. Exceptions
# include ECSV (no limit) and HTML (max_ndim=2).
max_ndim = 1
def __init__(self):
self.header = self.header_class()
self.data = self.data_class()
self.inputter = self.inputter_class()
self.outputter = self.outputter_class()
# Data and Header instances benefit from a little cross-coupling. Header may need to
# know about number of data columns for auto-column name generation and Data may
# need to know about header (e.g. for fixed-width tables where widths are spec'd in header.
self.data.header = self.header
self.header.data = self.data
# Metadata, consisting of table-level meta and column-level meta. The latter
# could include information about column type, description, formatting, etc,
# depending on the table meta format.
self.meta = OrderedDict(table=OrderedDict(), cols=OrderedDict())
def _check_multidim_table(self, table):
"""Check that the dimensions of columns in ``table`` are acceptable.
The reader class attribute ``max_ndim`` defines the maximum dimension of
columns that can be written using this format. The base value is ``1``,
corresponding to normal scalar columns with just a length.
Parameters
----------
table : `~astropy.table.Table`
Input table.
Raises
------
ValueError
If any column exceeds the number of allowed dimensions
"""
_check_multidim_table(table, self.max_ndim)
def read(self, table):
"""Read the ``table`` and return the results in a format determined by
the ``outputter`` attribute.
The ``table`` parameter is any string or object that can be processed
by the instance ``inputter``. For the base Inputter class ``table`` can be
one of:
* File name
* File-like object
* String (newline separated) with all header and data lines (must have at least 2 lines)
* List of strings
Parameters
----------
table : str, file-like, list
Input table.
Returns
-------
table : `~astropy.table.Table`
Output table
"""
# If ``table`` is a file then store the name in the ``data``
# attribute. The ``table`` is a "file" if it is a string
# without the new line specific to the OS.
with suppress(TypeError):
# Strings only
if os.linesep not in table + "":
self.data.table_name = os.path.basename(table)
# If one of the newline chars is set as field delimiter, only
# accept the other one as line splitter
if self.header.splitter.delimiter == "\n":
newline = "\r"
elif self.header.splitter.delimiter == "\r":
newline = "\n"
else:
newline = None
# Get a list of the lines (rows) in the table
self.lines = self.inputter.get_lines(table, newline=newline)
# Set self.data.data_lines to a slice of lines contain the data rows
self.data.get_data_lines(self.lines)
# Extract table meta values (e.g. keywords, comments, etc). Updates self.meta.
self.header.update_meta(self.lines, self.meta)
# Get the table column definitions
self.header.get_cols(self.lines)
# Make sure columns are valid
self.header.check_column_names(self.names, self.strict_names, self.guessing)
self.cols = cols = self.header.cols
self.data.splitter.cols = cols
n_cols = len(cols)
for i, str_vals in enumerate(self.data.get_str_vals()):
if len(str_vals) != n_cols:
str_vals = self.inconsistent_handler(str_vals, n_cols)
# if str_vals is None, we skip this row
if str_vals is None:
continue
# otherwise, we raise an error only if it is still inconsistent
if len(str_vals) != n_cols:
errmsg = (
"Number of header columns ({}) inconsistent with"
" data columns ({}) at data line {}\n"
"Header values: {}\n"
"Data values: {}".format(
n_cols, len(str_vals), i, [x.name for x in cols], str_vals
)
)
raise InconsistentTableError(errmsg)
for j, col in enumerate(cols):
col.str_vals.append(str_vals[j])
self.data.masks(cols)
if hasattr(self.header, "table_meta"):
self.meta["table"].update(self.header.table_meta)
_apply_include_exclude_names(
self.header, self.names, self.include_names, self.exclude_names
)
table = self.outputter(self.header.cols, self.meta)
self.cols = self.header.cols
return table
def inconsistent_handler(self, str_vals, ncols):
"""
Adjust or skip data entries if a row is inconsistent with the header.
The default implementation does no adjustment, and hence will always trigger
an exception in read() any time the number of data entries does not match
the header.
Note that this will *not* be called if the row already matches the header.
Parameters
----------
str_vals : list
A list of value strings from the current row of the table.
ncols : int
The expected number of entries from the table header.
Returns
-------
str_vals : list
List of strings to be parsed into data entries in the output table. If
the length of this list does not match ``ncols``, an exception will be
raised in read(). Can also be None, in which case the row will be
skipped.
"""
# an empty list will always trigger an InconsistentTableError in read()
return str_vals
@property
def comment_lines(self):
"""Return lines in the table that match header.comment regexp"""
if not hasattr(self, "lines"):
raise ValueError(
"Table must be read prior to accessing the header comment lines"
)
if self.header.comment:
re_comment = re.compile(self.header.comment)
comment_lines = [x for x in self.lines if re_comment.match(x)]
else:
comment_lines = []
return comment_lines
def update_table_data(self, table):
"""
Update table columns in place if needed.
This is a hook to allow updating the table columns after name
filtering but before setting up to write the data. This is currently
only used by ECSV and is otherwise just a pass-through.
Parameters
----------
table : `astropy.table.Table`
Input table for writing
Returns
-------
table : `astropy.table.Table`
Output table for writing
"""
return table
def write_header(self, lines, meta):
self.header.write_comments(lines, meta)
self.header.write(lines)
def write(self, table):
"""
Write ``table`` as list of strings.
Parameters
----------
table : `~astropy.table.Table`
Input table data.
Returns
-------
lines : list
List of strings corresponding to ASCII table
"""
# Check column names before altering
self.header.cols = list(table.columns.values())
self.header.check_column_names(self.names, self.strict_names, False)
# In-place update of columns in input ``table`` to reflect column
# filtering. Note that ``table`` is guaranteed to be a copy of the
# original user-supplied table.
_apply_include_exclude_names(
table, self.names, self.include_names, self.exclude_names
)
# This is a hook to allow updating the table columns after name
# filtering but before setting up to write the data. This is currently
# only used by ECSV and is otherwise just a pass-through.
table = self.update_table_data(table)
# Check that table column dimensions are supported by this format class.
# Most formats support only 1-d columns, but some like ECSV support N-d.
self._check_multidim_table(table)
# Now use altered columns
new_cols = list(table.columns.values())
# link information about the columns to the writer object (i.e. self)
self.header.cols = new_cols
self.data.cols = new_cols
self.header.table_meta = table.meta
# Write header and data to lines list
lines = []
self.write_header(lines, table.meta)
self.data.write(lines)
return lines
class ContinuationLinesInputter(BaseInputter):
"""Inputter where lines ending in ``continuation_char`` are joined
with the subsequent line. Example::
col1 col2 col3
1 \
2 3
4 5 \
6
"""
continuation_char = "\\"
replace_char = " "
# If no_continue is not None then lines matching this regex are not subject
# to line continuation. The initial use case here is Daophot. In this
# case the continuation character is just replaced with replace_char.
no_continue = None
def process_lines(self, lines):
re_no_continue = re.compile(self.no_continue) if self.no_continue else None
parts = []
outlines = []
for line in lines:
if re_no_continue and re_no_continue.match(line):
line = line.replace(self.continuation_char, self.replace_char)
if line.endswith(self.continuation_char):
parts.append(line.replace(self.continuation_char, self.replace_char))
else:
parts.append(line)
outlines.append("".join(parts))
parts = []
return outlines
class WhitespaceSplitter(DefaultSplitter):
def process_line(self, line):
"""Replace tab with space within ``line`` while respecting quoted substrings"""
newline = []
in_quote = False
lastchar = None
for char in line:
if char == self.quotechar and (
self.escapechar is None or lastchar != self.escapechar
):
in_quote = not in_quote
if char == "\t" and not in_quote:
char = " "
lastchar = char
newline.append(char)
return "".join(newline)
extra_reader_pars = (
"Reader",
"Inputter",
"Outputter",
"delimiter",
"comment",
"quotechar",
"header_start",
"data_start",
"data_end",
"converters",
"encoding",
"data_Splitter",
"header_Splitter",
"names",
"include_names",
"exclude_names",
"strict_names",
"fill_values",
"fill_include_names",
"fill_exclude_names",
)
def _get_reader(Reader, Inputter=None, Outputter=None, **kwargs):
"""Initialize a table reader allowing for common customizations. See ui.get_reader()
for param docs. This routine is for internal (package) use only and is useful
because it depends only on the "core" module.
"""
from .fastbasic import FastBasic
if issubclass(Reader, FastBasic): # Fast readers handle args separately
if Inputter is not None:
kwargs["Inputter"] = Inputter
return Reader(**kwargs)
# If user explicitly passed a fast reader with enable='force'
# (e.g. by passing non-default options), raise an error for slow readers
if "fast_reader" in kwargs:
if kwargs["fast_reader"]["enable"] == "force":
raise ParameterError(
"fast_reader required with "
"{}, but this is not a fast C reader: {}".format(
kwargs["fast_reader"], Reader
)
)
else:
del kwargs["fast_reader"] # Otherwise ignore fast_reader parameter
reader_kwargs = {k: v for k, v in kwargs.items() if k not in extra_reader_pars}
reader = Reader(**reader_kwargs)
if Inputter is not None:
reader.inputter = Inputter()
if Outputter is not None:
reader.outputter = Outputter()
# Issue #855 suggested to set data_start to header_start + default_header_length
# Thus, we need to retrieve this from the class definition before resetting these numbers.
try:
default_header_length = reader.data.start_line - reader.header.start_line
except TypeError: # Start line could be None or an instancemethod
default_header_length = None
# csv.reader is hard-coded to recognise either '\r' or '\n' as end-of-line,
# therefore DefaultSplitter cannot handle these as delimiters.
if "delimiter" in kwargs:
if kwargs["delimiter"] in ("\n", "\r", "\r\n"):
reader.header.splitter = BaseSplitter()
reader.data.splitter = BaseSplitter()
reader.header.splitter.delimiter = kwargs["delimiter"]
reader.data.splitter.delimiter = kwargs["delimiter"]
if "comment" in kwargs:
reader.header.comment = kwargs["comment"]
reader.data.comment = kwargs["comment"]
if "quotechar" in kwargs:
reader.header.splitter.quotechar = kwargs["quotechar"]
reader.data.splitter.quotechar = kwargs["quotechar"]
if "data_start" in kwargs:
reader.data.start_line = kwargs["data_start"]
if "data_end" in kwargs:
reader.data.end_line = kwargs["data_end"]
if "header_start" in kwargs:
if reader.header.start_line is not None:
reader.header.start_line = kwargs["header_start"]
# For FixedWidthTwoLine the data_start is calculated relative to the position line.
# However, position_line is given as absolute number and not relative to header_start.
# So, ignore this Reader here.
if (
("data_start" not in kwargs)
and (default_header_length is not None)
and reader._format_name
not in ["fixed_width_two_line", "commented_header"]
):
reader.data.start_line = (
reader.header.start_line + default_header_length
)
elif kwargs["header_start"] is not None:
# User trying to set a None header start to some value other than None
raise ValueError("header_start cannot be modified for this Reader")
if "converters" in kwargs:
reader.outputter.converters = kwargs["converters"]
if "data_Splitter" in kwargs:
reader.data.splitter = kwargs["data_Splitter"]()
if "header_Splitter" in kwargs:
reader.header.splitter = kwargs["header_Splitter"]()
if "names" in kwargs:
reader.names = kwargs["names"]
if None in reader.names:
raise TypeError("Cannot have None for column name")
if len(set(reader.names)) != len(reader.names):
raise ValueError("Duplicate column names")
if "include_names" in kwargs:
reader.include_names = kwargs["include_names"]
if "exclude_names" in kwargs:
reader.exclude_names = kwargs["exclude_names"]
# Strict names is normally set only within the guessing process to
# indicate that column names cannot be numeric or have certain
# characters at the beginning or end. It gets used in
# BaseHeader.check_column_names().
if "strict_names" in kwargs:
reader.strict_names = kwargs["strict_names"]
if "fill_values" in kwargs:
reader.data.fill_values = kwargs["fill_values"]
if "fill_include_names" in kwargs:
reader.data.fill_include_names = kwargs["fill_include_names"]
if "fill_exclude_names" in kwargs:
reader.data.fill_exclude_names = kwargs["fill_exclude_names"]
if "encoding" in kwargs:
reader.encoding = kwargs["encoding"]
reader.inputter.encoding = kwargs["encoding"]
return reader
extra_writer_pars = (
"delimiter",
"comment",
"quotechar",
"formats",
"strip_whitespace",
"names",
"include_names",
"exclude_names",
"fill_values",
"fill_include_names",
"fill_exclude_names",
)
def _get_writer(Writer, fast_writer, **kwargs):
"""Initialize a table writer allowing for common customizations. This
routine is for internal (package) use only and is useful because it depends
only on the "core" module."""
from .fastbasic import FastBasic
# A value of None for fill_values imply getting the default string
# representation of masked values (depending on the writer class), but the
# machinery expects a list. The easiest here is to just pop the value off,
# i.e. fill_values=None is the same as not providing it at all.
if "fill_values" in kwargs and kwargs["fill_values"] is None:
del kwargs["fill_values"]
if issubclass(Writer, FastBasic): # Fast writers handle args separately
return Writer(**kwargs)
elif fast_writer and f"fast_{Writer._format_name}" in FAST_CLASSES:
# Switch to fast writer
kwargs["fast_writer"] = fast_writer
return FAST_CLASSES[f"fast_{Writer._format_name}"](**kwargs)
writer_kwargs = {k: v for k, v in kwargs.items() if k not in extra_writer_pars}
writer = Writer(**writer_kwargs)
if "delimiter" in kwargs:
writer.header.splitter.delimiter = kwargs["delimiter"]
writer.data.splitter.delimiter = kwargs["delimiter"]
if "comment" in kwargs:
writer.header.write_comment = kwargs["comment"]
writer.data.write_comment = kwargs["comment"]
if "quotechar" in kwargs:
writer.header.splitter.quotechar = kwargs["quotechar"]
writer.data.splitter.quotechar = kwargs["quotechar"]
if "formats" in kwargs:
writer.data.formats = kwargs["formats"]
if "strip_whitespace" in kwargs:
if kwargs["strip_whitespace"]:
# Restore the default SplitterClass process_val method which strips
# whitespace. This may have been changed in the Writer
# initialization (e.g. Rdb and Tab)
writer.data.splitter.process_val = operator.methodcaller("strip", " \t")
else:
writer.data.splitter.process_val = None
if "names" in kwargs:
writer.header.names = kwargs["names"]
if "include_names" in kwargs:
writer.include_names = kwargs["include_names"]
if "exclude_names" in kwargs:
writer.exclude_names = kwargs["exclude_names"]
if "fill_values" in kwargs:
# Prepend user-specified values to the class default.
with suppress(TypeError, IndexError):
# Test if it looks like (match, replace_string, optional_colname),
# in which case make it a list
kwargs["fill_values"][1] + ""
kwargs["fill_values"] = [kwargs["fill_values"]]
writer.data.fill_values = kwargs["fill_values"] + writer.data.fill_values
if "fill_include_names" in kwargs:
writer.data.fill_include_names = kwargs["fill_include_names"]
if "fill_exclude_names" in kwargs:
writer.data.fill_exclude_names = kwargs["fill_exclude_names"]
return writer
|
127d8823fd96ddadf3fc9ff0c0404c40a2fbfe8c802e9eb05da0366751c491d7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
""" An extensible ASCII table reader and writer.
"""
# flake8: noqa
from . import connect
from .basic import (
Basic,
BasicData,
BasicHeader,
CommentedHeader,
Csv,
NoHeader,
Rdb,
Tab,
)
from .cds import Cds
from .core import (
AllType,
BaseData,
BaseHeader,
BaseInputter,
BaseOutputter,
BaseReader,
BaseSplitter,
Column,
ContinuationLinesInputter,
DefaultSplitter,
FloatType,
InconsistentTableError,
IntType,
NoType,
NumType,
ParameterError,
StrType,
TableOutputter,
WhitespaceSplitter,
convert_numpy,
masked,
)
from .daophot import Daophot
from .ecsv import Ecsv
from .fastbasic import (
FastBasic,
FastCommentedHeader,
FastCsv,
FastNoHeader,
FastRdb,
FastTab,
)
from .fixedwidth import (
FixedWidth,
FixedWidthData,
FixedWidthHeader,
FixedWidthNoHeader,
FixedWidthSplitter,
FixedWidthTwoLine,
)
from .html import HTML
from .ipac import Ipac
from .latex import AASTex, Latex, latexdicts
from .mrt import Mrt
from .qdp import QDP
from .rst import RST
from .sextractor import SExtractor
from .ui import get_read_trace, get_reader, get_writer, read, set_guess, write
|
0366009cdb3974b969ca04d8f385bae457bd0a2269bd2f430018b0476cfe9a65 | # Licensed under a 3-clause BSD style license
"""
:Author: Simon Gibbons ([email protected])
"""
from .core import DefaultSplitter
from .fixedwidth import (
FixedWidth,
FixedWidthData,
FixedWidthHeader,
FixedWidthTwoLineDataSplitter,
)
class SimpleRSTHeader(FixedWidthHeader):
position_line = 0
start_line = 1
splitter_class = DefaultSplitter
position_char = "="
def get_fixedwidth_params(self, line):
vals, starts, ends = super().get_fixedwidth_params(line)
# The right hand column can be unbounded
ends[-1] = None
return vals, starts, ends
class SimpleRSTData(FixedWidthData):
start_line = 3
end_line = -1
splitter_class = FixedWidthTwoLineDataSplitter
class RST(FixedWidth):
"""reStructuredText simple format table.
See: https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html#simple-tables
Example::
==== ===== ======
Col1 Col2 Col3
==== ===== ======
1 2.3 Hello
2 4.5 Worlds
==== ===== ======
Currently there is no support for reading tables which utilize continuation lines,
or for ones which define column spans through the use of an additional
line of dashes in the header.
"""
_format_name = "rst"
_description = "reStructuredText simple table"
data_class = SimpleRSTData
header_class = SimpleRSTHeader
def __init__(self):
super().__init__(delimiter_pad=None, bookend=False)
def write(self, lines):
lines = super().write(lines)
lines = [lines[1]] + lines + [lines[1]]
return lines
|
fc9cfb1437081c2a32ed520f4bb7dafa4a543195b9a5959e98fa0b267a3f4ec2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
fixedwidth.py:
Read or write a table with fixed width columns.
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
from . import basic, core
from .core import DefaultSplitter, InconsistentTableError
class FixedWidthSplitter(core.BaseSplitter):
"""
Split line based on fixed start and end positions for each ``col`` in
``self.cols``.
This class requires that the Header class will have defined ``col.start``
and ``col.end`` for each column. The reference to the ``header.cols`` gets
put in the splitter object by the base Reader.read() function just in time
for splitting data lines by a ``data`` object.
Note that the ``start`` and ``end`` positions are defined in the pythonic
style so line[start:end] is the desired substring for a column. This splitter
class does not have a hook for ``process_lines`` since that is generally not
useful for fixed-width input.
"""
delimiter_pad = ""
bookend = False
delimiter = "|"
def __call__(self, lines):
for line in lines:
vals = [line[x.start : x.end] for x in self.cols]
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals, widths):
pad = self.delimiter_pad or ""
delimiter = self.delimiter or ""
padded_delim = pad + delimiter + pad
if self.bookend:
bookend_left = delimiter + pad
bookend_right = pad + delimiter
else:
bookend_left = ""
bookend_right = ""
vals = [" " * (width - len(val)) + val for val, width in zip(vals, widths)]
return bookend_left + padded_delim.join(vals) + bookend_right
class FixedWidthHeaderSplitter(DefaultSplitter):
"""Splitter class that splits on ``|``."""
delimiter = "|"
class FixedWidthHeader(basic.BasicHeader):
"""
Fixed width table header reader.
"""
splitter_class = FixedWidthHeaderSplitter
""" Splitter class for splitting data lines into columns """
position_line = None # secondary header line position
""" row index of line that specifies position (default = 1) """
set_of_position_line_characters = set(r'`~!#$%^&*-_+=\|":' + "'")
def get_line(self, lines, index):
for i, line in enumerate(self.process_lines(lines)):
if i == index:
break
else: # No header line matching
raise InconsistentTableError("No header line found in table")
return line
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines``.
Based on the previously set Header attributes find or create the column names.
Sets ``self.cols`` with the list of Columns.
Parameters
----------
lines : list
List of table lines
"""
header_rows = getattr(self, "header_rows", ["name"])
# See "else" clause below for explanation of start_line and position_line
start_line = core._get_line_index(self.start_line, self.process_lines(lines))
position_line = core._get_line_index(
self.position_line, self.process_lines(lines)
)
# If start_line is none then there is no header line. Column positions are
# determined from first data line and column names are either supplied by user
# or auto-generated.
if start_line is None:
if position_line is not None:
raise ValueError(
"Cannot set position_line without also setting header_start"
)
# data.data_lines attribute already set via self.data.get_data_lines(lines)
# in BaseReader.read(). This includes slicing for data_start / data_end.
data_lines = self.data.data_lines
if not data_lines:
raise InconsistentTableError(
"No data lines found so cannot autogenerate column names"
)
vals, starts, ends = self.get_fixedwidth_params(data_lines[0])
self.names = [self.auto_format.format(i) for i in range(1, len(vals) + 1)]
else:
# This bit of code handles two cases:
# start_line = <index> and position_line = None
# Single header line where that line is used to determine both the
# column positions and names.
# start_line = <index> and position_line = <index2>
# Two header lines where the first line defines the column names and
# the second line defines the column positions
if position_line is not None:
# Define self.col_starts and self.col_ends so that the call to
# get_fixedwidth_params below will use those to find the header
# column names. Note that get_fixedwidth_params returns Python
# slice col_ends but expects inclusive col_ends on input (for
# more intuitive user interface).
line = self.get_line(lines, position_line)
if len(set(line) - {self.splitter.delimiter, " "}) != 1:
raise InconsistentTableError(
"Position line should only contain delimiters and "
'one other character, e.g. "--- ------- ---".'
)
# The line above lies. It accepts white space as well.
# We don't want to encourage using three different
# characters, because that can cause ambiguities, but white
# spaces are so common everywhere that practicality beats
# purity here.
charset = self.set_of_position_line_characters.union(
{self.splitter.delimiter, " "}
)
if not set(line).issubset(charset):
raise InconsistentTableError(
f"Characters in position line must be part of {charset}"
)
vals, self.col_starts, col_ends = self.get_fixedwidth_params(line)
self.col_ends = [x - 1 if x is not None else None for x in col_ends]
# Get the column names from the header line
line = self.get_line(lines, start_line + header_rows.index("name"))
self.names, starts, ends = self.get_fixedwidth_params(line)
self._set_cols_from_names()
for ii, attr in enumerate(header_rows):
if attr != "name":
line = self.get_line(lines, start_line + ii)
vals = self.get_fixedwidth_params(line)[0]
for col, val in zip(self.cols, vals):
if val:
setattr(col, attr, val)
# Set column start and end positions.
for i, col in enumerate(self.cols):
col.start = starts[i]
col.end = ends[i]
def get_fixedwidth_params(self, line):
"""
Split ``line`` on the delimiter and determine column values and
column start and end positions. This might include null columns with
zero length (e.g. for ``header row = "| col1 || col2 | col3 |"`` or
``header2_row = "----- ------- -----"``). The null columns are
stripped out. Returns the values between delimiters and the
corresponding start and end positions.
Parameters
----------
line : str
Input line
Returns
-------
vals : list
List of values.
starts : list
List of starting indices.
ends : list
List of ending indices.
"""
# If column positions are already specified then just use those.
# If neither column starts or ends are given, figure out positions
# between delimiters. Otherwise, either the starts or the ends have
# been given, so figure out whichever wasn't given.
if self.col_starts is not None and self.col_ends is not None:
starts = list(self.col_starts) # could be any iterable, e.g. np.array
# user supplies inclusive endpoint
ends = [x + 1 if x is not None else None for x in self.col_ends]
if len(starts) != len(ends):
raise ValueError(
"Fixed width col_starts and col_ends must have the same length"
)
vals = [line[start:end].strip() for start, end in zip(starts, ends)]
elif self.col_starts is None and self.col_ends is None:
# There might be a cleaner way to do this but it works...
vals = line.split(self.splitter.delimiter)
starts = [0]
ends = []
for val in vals:
if val:
ends.append(starts[-1] + len(val))
starts.append(ends[-1] + 1)
else:
starts[-1] += 1
starts = starts[:-1]
vals = [x.strip() for x in vals if x]
if len(vals) != len(starts) or len(vals) != len(ends):
raise InconsistentTableError("Error parsing fixed width header")
else:
# exactly one of col_starts or col_ends is given...
if self.col_starts is not None:
starts = list(self.col_starts)
ends = starts[1:] + [None] # Assume each col ends where the next starts
else: # self.col_ends is not None
ends = [x + 1 for x in self.col_ends]
starts = [0] + ends[:-1] # Assume each col starts where the last ended
vals = [line[start:end].strip() for start, end in zip(starts, ends)]
return vals, starts, ends
def write(self, lines):
# Header line not written until data are formatted. Until then it is
# not known how wide each column will be for fixed width.
pass
class FixedWidthData(basic.BasicData):
"""
Base table data reader.
"""
splitter_class = FixedWidthSplitter
""" Splitter class for splitting data lines into columns """
start_line = None
def write(self, lines):
default_header_rows = [] if self.header.start_line is None else ["name"]
header_rows = getattr(self, "header_rows", default_header_rows)
# First part is getting the widths of each column.
# List (rows) of list (column values) for data lines
vals_list = []
col_str_iters = self.str_vals()
for vals in zip(*col_str_iters):
vals_list.append(vals)
# List (rows) of list (columns values) for header lines.
hdrs_list = []
for col_attr in header_rows:
vals = [
"" if (val := getattr(col.info, col_attr)) is None else str(val)
for col in self.cols
]
hdrs_list.append(vals)
# Widths for data columns
widths = [
max(len(vals[i_col]) for vals in vals_list)
for i_col in range(len(self.cols))
]
# Incorporate widths for header columns (if there are any)
if hdrs_list:
for i_col in range(len(self.cols)):
widths[i_col] = max(
widths[i_col], max(len(vals[i_col]) for vals in hdrs_list)
)
# Now collect formatted header and data lines into the output lines
for vals in hdrs_list:
lines.append(self.splitter.join(vals, widths))
if self.header.position_line is not None:
vals = [self.header.position_char * width for width in widths]
lines.append(self.splitter.join(vals, widths))
for vals in vals_list:
lines.append(self.splitter.join(vals, widths))
return lines
class FixedWidth(basic.Basic):
"""Fixed width table with single header line defining column names and positions.
Examples::
# Bar delimiter in header and data
| Col1 | Col2 | Col3 |
| 1.2 | hello there | 3 |
| 2.4 | many words | 7 |
# Bar delimiter in header only
Col1 | Col2 | Col3
1.2 hello there 3
2.4 many words 7
# No delimiter with column positions specified as input
Col1 Col2Col3
1.2hello there 3
2.4many words 7
See the :ref:`astropy:fixed_width_gallery` for specific usage examples.
"""
_format_name = "fixed_width"
_description = "Fixed width"
header_class = FixedWidthHeader
data_class = FixedWidthData
def __init__(
self,
col_starts=None,
col_ends=None,
delimiter_pad=" ",
bookend=True,
header_rows=None,
):
if header_rows is None:
header_rows = ["name"]
super().__init__()
self.data.splitter.delimiter_pad = delimiter_pad
self.data.splitter.bookend = bookend
self.header.col_starts = col_starts
self.header.col_ends = col_ends
self.header.header_rows = header_rows
self.data.header_rows = header_rows
if self.data.start_line is None:
self.data.start_line = len(header_rows)
class FixedWidthNoHeaderHeader(FixedWidthHeader):
"""Header reader for fixed with tables with no header line"""
start_line = None
class FixedWidthNoHeaderData(FixedWidthData):
"""Data reader for fixed width tables with no header line"""
start_line = 0
class FixedWidthNoHeader(FixedWidth):
"""Fixed width table which has no header line.
When reading, column names are either input (``names`` keyword) or
auto-generated. Column positions are determined either by input
(``col_starts`` and ``col_stops`` keywords) or by splitting the first data
line. In the latter case a ``delimiter`` is required to split the data
line.
Examples::
# Bar delimiter in header and data
| 1.2 | hello there | 3 |
| 2.4 | many words | 7 |
# Compact table having no delimiter and column positions specified as input
1.2hello there3
2.4many words 7
This class is just a convenience wrapper around the ``FixedWidth`` reader
but with ``header_start=None`` and ``data_start=0``.
See the :ref:`astropy:fixed_width_gallery` for specific usage examples.
"""
_format_name = "fixed_width_no_header"
_description = "Fixed width with no header"
header_class = FixedWidthNoHeaderHeader
data_class = FixedWidthNoHeaderData
def __init__(self, col_starts=None, col_ends=None, delimiter_pad=" ", bookend=True):
super().__init__(
col_starts,
col_ends,
delimiter_pad=delimiter_pad,
bookend=bookend,
header_rows=[],
)
class FixedWidthTwoLineHeader(FixedWidthHeader):
"""Header reader for fixed width tables splitting on whitespace.
For fixed width tables with several header lines, there is typically
a white-space delimited format line, so splitting on white space is
needed.
"""
splitter_class = DefaultSplitter
class FixedWidthTwoLineDataSplitter(FixedWidthSplitter):
"""Splitter for fixed width tables splitting on ``' '``."""
delimiter = " "
class FixedWidthTwoLineData(FixedWidthData):
"""Data reader for fixed with tables with two header lines."""
splitter_class = FixedWidthTwoLineDataSplitter
class FixedWidthTwoLine(FixedWidth):
"""Fixed width table which has two header lines.
The first header line defines the column names and the second implicitly
defines the column positions.
Examples::
# Typical case with column extent defined by ---- under column names.
col1 col2 <== header_start = 0
----- ------------ <== position_line = 1, position_char = "-"
1 bee flies <== data_start = 2
2 fish swims
# Pretty-printed table
+------+------------+
| Col1 | Col2 |
+------+------------+
| 1.2 | "hello" |
| 2.4 | there world|
+------+------------+
See the :ref:`astropy:fixed_width_gallery` for specific usage examples.
"""
_format_name = "fixed_width_two_line"
_description = "Fixed width with second header line"
data_class = FixedWidthTwoLineData
header_class = FixedWidthTwoLineHeader
def __init__(
self,
position_line=None,
position_char="-",
delimiter_pad=None,
bookend=False,
header_rows=None,
):
if len(position_char) != 1:
raise ValueError(
f'Position_char="{position_char}" must be a single character'
)
super().__init__(
delimiter_pad=delimiter_pad, bookend=bookend, header_rows=header_rows
)
if position_line is None:
position_line = len(self.header.header_rows)
self.header.position_line = position_line
self.header.position_char = position_char
self.data.start_line = position_line + 1
|
ddde45dfd79b58687778bdaea1b8638044c9cd37efc2f67ffd81b2da8dc2db3a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Classes to read AAS MRT table format
Ref: https://journals.aas.org/mrt-standards
:Copyright: Smithsonian Astrophysical Observatory (2021)
:Author: Tom Aldcroft ([email protected]), \
Suyog Garg ([email protected])
"""
import re
import warnings
from io import StringIO
from math import ceil, floor
from string import Template
from textwrap import wrap
import numpy as np
from astropy import units as u
from astropy.table import Column, MaskedColumn, Table
from . import cds, core, fixedwidth
MAX_SIZE_README_LINE = 80
MAX_COL_INTLIMIT = 100000
__doctest_skip__ = ["*"]
BYTE_BY_BYTE_TEMPLATE = [
"Byte-by-byte Description of file: $file",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
"$bytebybyte",
"--------------------------------------------------------------------------------",
]
MRT_TEMPLATE = [
"Title:",
"Authors:",
"Table:",
"================================================================================",
"$bytebybyte",
"Notes:",
"--------------------------------------------------------------------------------",
]
class MrtSplitter(fixedwidth.FixedWidthSplitter):
"""
Contains the join function to left align the MRT columns
when writing to a file.
"""
def join(self, vals, widths):
vals = [val + " " * (width - len(val)) for val, width in zip(vals, widths)]
return self.delimiter.join(vals)
class MrtHeader(cds.CdsHeader):
_subfmt = "MRT"
def _split_float_format(self, value):
"""
Splits a Float string into different parts to find number
of digits after decimal and check if the value is in Scientific
notation.
Parameters
----------
value : str
String containing the float value to split.
Returns
-------
fmt: (int, int, int, bool, bool)
List of values describing the Float sting.
(size, dec, ent, sign, exp)
size, length of the given string.
ent, number of digits before decimal point.
dec, number of digits after decimal point.
sign, whether or not given value signed.
exp, is value in Scientific notation?
"""
regfloat = re.compile(
r"""(?P<sign> [+-]*)
(?P<ent> [^eE.]+)
(?P<deciPt> [.]*)
(?P<decimals> [0-9]*)
(?P<exp> [eE]*-*)[0-9]*""",
re.VERBOSE,
)
mo = regfloat.match(value)
if mo is None:
raise Exception(f"{value} is not a float number")
return (
len(value),
len(mo.group("ent")),
len(mo.group("decimals")),
mo.group("sign") != "",
mo.group("exp") != "",
)
def _set_column_val_limits(self, col):
"""
Sets the ``col.min`` and ``col.max`` column attributes,
taking into account columns with Null values.
"""
col.max = max(col)
col.min = min(col)
if col.max is np.ma.core.MaskedConstant:
col.max = None
if col.min is np.ma.core.MaskedConstant:
col.min = None
def column_float_formatter(self, col):
"""
String formatter function for a column containing Float values.
Checks if the values in the given column are in Scientific notation,
by spliting the value string. It is assumed that the column either has
float values or Scientific notation.
A ``col.formatted_width`` attribute is added to the column. It is not added
if such an attribute is already present, say when the ``formats`` argument
is passed to the writer. A properly formatted format string is also added as
the ``col.format`` attribute.
Parameters
----------
col : A ``Table.Column`` object.
"""
# maxsize: maximum length of string containing the float value.
# maxent: maximum number of digits places before decimal point.
# maxdec: maximum number of digits places after decimal point.
# maxprec: maximum precision of the column values, sum of maxent and maxdec.
maxsize, maxprec, maxent, maxdec = 1, 0, 1, 0
sign = False
fformat = "F"
# Find maximum sized value in the col
for val in col.str_vals:
# Skip null values
if val is None or val == "":
continue
# Find format of the Float string
fmt = self._split_float_format(val)
# If value is in Scientific notation
if fmt[4] is True:
# if the previous column value was in normal Float format
# set maxsize, maxprec and maxdec to default.
if fformat == "F":
maxsize, maxprec, maxdec = 1, 0, 0
# Designate the column to be in Scientific notation.
fformat = "E"
else:
# Move to next column value if
# current value is not in Scientific notation
# but the column is designated as such because
# one of the previous values was.
if fformat == "E":
continue
if maxsize < fmt[0]:
maxsize = fmt[0]
if maxent < fmt[1]:
maxent = fmt[1]
if maxdec < fmt[2]:
maxdec = fmt[2]
if fmt[3]:
sign = True
if maxprec < fmt[1] + fmt[2]:
maxprec = fmt[1] + fmt[2]
if fformat == "E":
# If ``formats`` not passed.
if getattr(col, "formatted_width", None) is None:
col.formatted_width = maxsize
if sign:
col.formatted_width += 1
# Number of digits after decimal is replaced by the precision
# for values in Scientific notation, when writing that Format.
col.fortran_format = fformat + str(col.formatted_width) + "." + str(maxprec)
col.format = str(col.formatted_width) + "." + str(maxdec) + "e"
else:
lead = ""
if (
getattr(col, "formatted_width", None) is None
): # If ``formats`` not passed.
col.formatted_width = maxent + maxdec + 1
if sign:
col.formatted_width += 1
elif col.format.startswith("0"):
# Keep leading zero, if already set in format - primarily for `seconds` columns
# in coordinates; may need extra case if this is to be also supported with `sign`.
lead = "0"
col.fortran_format = fformat + str(col.formatted_width) + "." + str(maxdec)
col.format = lead + col.fortran_format[1:] + "f"
def write_byte_by_byte(self):
"""
Writes the Byte-By-Byte description of the table.
Columns that are `astropy.coordinates.SkyCoord` or `astropy.time.TimeSeries`
objects or columns with values that are such objects are recognized as such,
and some predefined labels and description is used for them.
See the Vizier MRT Standard documentation in the link below for more details
on these. An example Byte-By-Byte table is shown here.
See: http://vizier.u-strasbg.fr/doc/catstd-3.1.htx
Example::
--------------------------------------------------------------------------------
Byte-by-byte Description of file: table.dat
--------------------------------------------------------------------------------
Bytes Format Units Label Explanations
--------------------------------------------------------------------------------
1- 8 A8 --- names Description of names
10-14 E5.1 --- e [-3160000.0/0.01] Description of e
16-23 F8.5 --- d [22.25/27.25] Description of d
25-31 E7.1 --- s [-9e+34/2.0] Description of s
33-35 I3 --- i [-30/67] Description of i
37-39 F3.1 --- sameF [5.0/5.0] Description of sameF
41-42 I2 --- sameI [20] Description of sameI
44-45 I2 h RAh Right Ascension (hour)
47-48 I2 min RAm Right Ascension (minute)
50-67 F18.15 s RAs Right Ascension (second)
69 A1 --- DE- Sign of Declination
70-71 I2 deg DEd Declination (degree)
73-74 I2 arcmin DEm Declination (arcmin)
76-91 F16.13 arcsec DEs Declination (arcsec)
--------------------------------------------------------------------------------
"""
# Get column widths
vals_list = []
col_str_iters = self.data.str_vals()
for vals in zip(*col_str_iters):
vals_list.append(vals)
for i, col in enumerate(self.cols):
col.width = max(len(vals[i]) for vals in vals_list)
if self.start_line is not None:
col.width = max(col.width, len(col.info.name))
widths = [col.width for col in self.cols]
startb = 1 # Byte count starts at 1.
# Set default width of the Bytes count column of the Byte-By-Byte table.
# This ``byte_count_width`` value helps align byte counts with respect
# to the hyphen using a format string.
byte_count_width = len(str(sum(widths) + len(self.cols) - 1))
# Format string for Start Byte and End Byte
singlebfmt = "{:" + str(byte_count_width) + "d}"
fmtb = singlebfmt + "-" + singlebfmt
# Add trailing single whitespaces to Bytes column for better visibility.
singlebfmt += " "
fmtb += " "
# Set default width of Label and Description Byte-By-Byte columns.
max_label_width, max_descrip_size = 7, 16
bbb = Table(
names=["Bytes", "Format", "Units", "Label", "Explanations"], dtype=[str] * 5
)
# Iterate over the columns to write Byte-By-Byte rows.
for i, col in enumerate(self.cols):
# Check if column is MaskedColumn
col.has_null = isinstance(col, MaskedColumn)
if col.format is not None:
col.formatted_width = max(len(sval) for sval in col.str_vals)
# Set MRTColumn type, size and format.
if np.issubdtype(col.dtype, np.integer):
# Integer formatter
self._set_column_val_limits(col)
# If ``formats`` not passed.
if getattr(col, "formatted_width", None) is None:
col.formatted_width = max(len(str(col.max)), len(str(col.min)))
col.fortran_format = "I" + str(col.formatted_width)
if col.format is None:
col.format = ">" + col.fortran_format[1:]
elif np.issubdtype(col.dtype, np.dtype(float).type):
# Float formatter
self._set_column_val_limits(col)
self.column_float_formatter(col)
else:
# String formatter, ``np.issubdtype(col.dtype, str)`` is ``True``.
dtype = col.dtype.str
if col.has_null:
mcol = col
mcol.fill_value = ""
coltmp = Column(mcol.filled(), dtype=str)
dtype = coltmp.dtype.str
# If ``formats`` not passed.
if getattr(col, "formatted_width", None) is None:
col.formatted_width = int(re.search(r"(\d+)$", dtype).group(1))
col.fortran_format = "A" + str(col.formatted_width)
col.format = str(col.formatted_width) + "s"
endb = col.formatted_width + startb - 1
# ``mixin`` columns converted to string valued columns will not have a name
# attribute. In those cases, a ``Unknown`` column label is put, indicating that
# such columns can be better formatted with some manipulation before calling
# the MRT writer.
if col.name is None:
col.name = "Unknown"
# Set column description.
if col.description is not None:
description = col.description
else:
description = "Description of " + col.name
# Set null flag in column description
nullflag = ""
if col.has_null:
nullflag = "?"
# Set column unit
if col.unit is not None:
col_unit = col.unit.to_string("cds")
elif col.name.lower().find("magnitude") > -1:
# ``col.unit`` can still be ``None``, if the unit of column values
# is ``Magnitude``, because ``astropy.units.Magnitude`` is actually a class.
# Unlike other units which are instances of ``astropy.units.Unit``,
# application of the ``Magnitude`` unit calculates the logarithm
# of the values. Thus, the only way to check for if the column values
# have ``Magnitude`` unit is to check the column name.
col_unit = "mag"
else:
col_unit = "---"
# Add col limit values to col description
lim_vals = ""
if (
col.min
and col.max
and not any(
x in col.name for x in ["RA", "DE", "LON", "LAT", "PLN", "PLT"]
)
):
# No col limit values for coordinate columns.
if col.fortran_format[0] == "I":
if (
abs(col.min) < MAX_COL_INTLIMIT
and abs(col.max) < MAX_COL_INTLIMIT
):
if col.min == col.max:
lim_vals = f"[{col.min}]"
else:
lim_vals = f"[{col.min}/{col.max}]"
elif col.fortran_format[0] in ("E", "F"):
lim_vals = (
f"[{floor(col.min * 100) / 100.}/{ceil(col.max * 100) / 100.}]"
)
if lim_vals != "" or nullflag != "":
description = f"{lim_vals}{nullflag} {description}"
# Find the maximum label and description column widths.
if len(col.name) > max_label_width:
max_label_width = len(col.name)
if len(description) > max_descrip_size:
max_descrip_size = len(description)
# Add a row for the Sign of Declination in the bbb table
if col.name == "DEd":
bbb.add_row(
[
singlebfmt.format(startb),
"A1",
"---",
"DE-",
"Sign of Declination",
]
)
col.fortran_format = "I2"
startb += 1
# Add Byte-By-Byte row to bbb table
bbb.add_row(
[
singlebfmt.format(startb)
if startb == endb
else fmtb.format(startb, endb),
"" if col.fortran_format is None else col.fortran_format,
col_unit,
"" if col.name is None else col.name,
description,
]
)
startb = endb + 2
# Properly format bbb columns
bbblines = StringIO()
bbb.write(
bbblines,
format="ascii.fixed_width_no_header",
delimiter=" ",
bookend=False,
delimiter_pad=None,
formats={
"Format": "<6s",
"Units": "<6s",
"Label": "<" + str(max_label_width) + "s",
"Explanations": "" + str(max_descrip_size) + "s",
},
)
# Get formatted bbb lines
bbblines = bbblines.getvalue().splitlines()
# ``nsplit`` is the number of whitespaces to prefix to long description
# lines in order to wrap them. It is the sum of the widths of the
# previous 4 columns plus the number of single spacing between them.
# The hyphen in the Bytes column is also counted.
nsplit = byte_count_width * 2 + 1 + 12 + max_label_width + 4
# Wrap line if it is too long
buff = ""
for newline in bbblines:
if len(newline) > MAX_SIZE_README_LINE:
buff += ("\n").join(
wrap(
newline,
subsequent_indent=" " * nsplit,
width=MAX_SIZE_README_LINE,
)
)
buff += "\n"
else:
buff += newline + "\n"
# Last value of ``endb`` is the sum of column widths after formatting.
self.linewidth = endb
# Remove the last extra newline character from Byte-By-Byte.
buff = buff[:-1]
return buff
def write(self, lines):
"""
Writes the Header of the MRT table, aka ReadMe, which
also contains the Byte-By-Byte description of the table.
"""
from astropy.coordinates import SkyCoord
# Recognised ``SkyCoord.name`` forms with their default column names (helio* require SunPy).
coord_systems = {
"galactic": ("GLAT", "GLON", "b", "l"),
"ecliptic": ("ELAT", "ELON", "lat", "lon"), # 'geocentric*ecliptic'
"heliographic": ("HLAT", "HLON", "lat", "lon"), # '_carrington|stonyhurst'
"helioprojective": ("HPLT", "HPLN", "Ty", "Tx"),
}
eqtnames = ["RAh", "RAm", "RAs", "DEd", "DEm", "DEs"]
# list to store indices of columns that are modified.
to_pop = []
# For columns that are instances of ``SkyCoord`` and other ``mixin`` columns
# or whose values are objects of these classes.
for i, col in enumerate(self.cols):
# If col is a ``Column`` object but its values are ``SkyCoord`` objects,
# convert the whole column to ``SkyCoord`` object, which helps in applying
# SkyCoord methods directly.
if not isinstance(col, SkyCoord) and isinstance(col[0], SkyCoord):
try:
col = SkyCoord(col)
except (ValueError, TypeError):
# If only the first value of the column is a ``SkyCoord`` object,
# the column cannot be converted to a ``SkyCoord`` object.
# These columns are converted to ``Column`` object and then converted
# to string valued column.
if not isinstance(col, Column):
col = Column(col)
col = Column([str(val) for val in col])
self.cols[i] = col
continue
# Replace single ``SkyCoord`` column by its coordinate components if no coordinate
# columns of the correspoding type exist yet.
if isinstance(col, SkyCoord):
# If coordinates are given in RA/DEC, divide each them into hour/deg,
# minute/arcminute, second/arcsecond columns.
if (
"ra" in col.representation_component_names.keys()
and len(set(eqtnames) - set(self.colnames)) == 6
):
ra_c, dec_c = col.ra.hms, col.dec.dms
coords = [
ra_c.h.round().astype("i1"),
ra_c.m.round().astype("i1"),
ra_c.s,
dec_c.d.round().astype("i1"),
dec_c.m.round().astype("i1"),
dec_c.s,
]
coord_units = [u.h, u.min, u.second, u.deg, u.arcmin, u.arcsec]
coord_descrip = [
"Right Ascension (hour)",
"Right Ascension (minute)",
"Right Ascension (second)",
"Declination (degree)",
"Declination (arcmin)",
"Declination (arcsec)",
]
for coord, name, coord_unit, descrip in zip(
coords, eqtnames, coord_units, coord_descrip
):
# Have Sign of Declination only in the DEd column.
if name in ["DEm", "DEs"]:
coord_col = Column(
list(np.abs(coord)),
name=name,
unit=coord_unit,
description=descrip,
)
else:
coord_col = Column(
list(coord),
name=name,
unit=coord_unit,
description=descrip,
)
# Set default number of digits after decimal point for the
# second values, and deg-min to (signed) 2-digit zero-padded integer.
if name == "RAs":
coord_col.format = "013.10f"
elif name == "DEs":
coord_col.format = "012.9f"
elif name == "RAh":
coord_col.format = "2d"
elif name == "DEd":
coord_col.format = "+03d"
elif name.startswith(("RA", "DE")):
coord_col.format = "02d"
self.cols.append(coord_col)
to_pop.append(i) # Delete original ``SkyCoord`` column.
# For all other coordinate types, simply divide into two columns
# for latitude and longitude resp. with the unit used been as it is.
else:
frminfo = ""
for frame, latlon in coord_systems.items():
if (
frame in col.name
and len(set(latlon[:2]) - set(self.colnames)) == 2
):
if frame != col.name:
frminfo = f" ({col.name})"
lon_col = Column(
getattr(col, latlon[3]),
name=latlon[1],
description=f"{frame.capitalize()} Longitude{frminfo}",
unit=col.representation_component_units[latlon[3]],
format=".12f",
)
lat_col = Column(
getattr(col, latlon[2]),
name=latlon[0],
description=f"{frame.capitalize()} Latitude{frminfo}",
unit=col.representation_component_units[latlon[2]],
format="+.12f",
)
self.cols.append(lon_col)
self.cols.append(lat_col)
to_pop.append(i) # Delete original ``SkyCoord`` column.
# Convert all other ``SkyCoord`` columns that are not in the above three
# representations to string valued columns. Those could either be types not
# supported yet (e.g. 'helioprojective'), or already present and converted.
# If there were any extra ``SkyCoord`` columns of one kind after the first one,
# then their decomposition into their component columns has been skipped.
# This is done in order to not create duplicate component columns.
# Explicit renaming of the extra coordinate component columns by appending some
# suffix to their name, so as to distinguish them, is not yet implemented.
if i not in to_pop:
warnings.warn(
f"Coordinate system of type '{col.name}' already stored in"
" table as CDS/MRT-syle columns or of unrecognized type. So"
f" column {i} is being skipped with designation of a string"
f" valued column `{self.colnames[i]}`.",
UserWarning,
)
self.cols.append(Column(col.to_string(), name=self.colnames[i]))
to_pop.append(i) # Delete original ``SkyCoord`` column.
# Convert all other ``mixin`` columns to ``Column`` objects.
# Parsing these may still lead to errors!
elif not isinstance(col, Column):
col = Column(col)
# If column values are ``object`` types, convert them to string.
if np.issubdtype(col.dtype, np.dtype(object).type):
col = Column([str(val) for val in col])
self.cols[i] = col
# Delete original ``SkyCoord`` columns, if there were any.
for i in to_pop[::-1]:
self.cols.pop(i)
# Check for any left over extra coordinate columns.
if any(x in self.colnames for x in ["RAh", "DEd", "ELON", "GLAT"]):
# At this point any extra ``SkyCoord`` columns should have been converted to string
# valued columns, together with issuance of a warning, by the coordinate parser above.
# This test is just left here as a safeguard.
for i, col in enumerate(self.cols):
if isinstance(col, SkyCoord):
self.cols[i] = Column(col.to_string(), name=self.colnames[i])
message = (
"Table already has coordinate system in CDS/MRT-syle columns. "
f"So column {i} should have been replaced already with "
f"a string valued column `{self.colnames[i]}`."
)
raise core.InconsistentTableError(message)
# Get Byte-By-Byte description and fill the template
bbb_template = Template("\n".join(BYTE_BY_BYTE_TEMPLATE))
byte_by_byte = bbb_template.substitute(
{"file": "table.dat", "bytebybyte": self.write_byte_by_byte()}
)
# Fill up the full ReadMe
rm_template = Template("\n".join(MRT_TEMPLATE))
readme_filled = rm_template.substitute({"bytebybyte": byte_by_byte})
lines.append(readme_filled)
class MrtData(cds.CdsData):
"""MRT table data reader"""
_subfmt = "MRT"
splitter_class = MrtSplitter
def write(self, lines):
self.splitter.delimiter = " "
fixedwidth.FixedWidthData.write(self, lines)
class Mrt(core.BaseReader):
"""AAS MRT (Machine-Readable Table) format table.
**Reading**
::
>>> from astropy.io import ascii
>>> table = ascii.read('data.mrt', format='mrt')
**Writing**
Use ``ascii.write(table, 'data.mrt', format='mrt')`` to write tables to
Machine Readable Table (MRT) format.
Note that the metadata of the table, apart from units, column names and
description, will not be written. These have to be filled in by hand later.
See also: :ref:`cds_mrt_format`.
Caveats:
* The Units and Explanations are available in the column ``unit`` and
``description`` attributes, respectively.
* The other metadata defined by this format is not available in the output table.
"""
_format_name = "mrt"
_io_registry_format_aliases = ["mrt"]
_io_registry_can_write = True
_description = "MRT format table"
data_class = MrtData
header_class = MrtHeader
def write(self, table=None):
# Construct for writing empty table is not yet done.
if len(table) == 0:
raise NotImplementedError
self.data.header = self.header
self.header.position_line = None
self.header.start_line = None
# Create a copy of the ``table``, so that it the copy gets modified and
# written to the file, while the original table remains as it is.
table = table.copy()
return super().write(table)
|
a91e3abf645f61a6c8833726cfa26f7430f17fd1353542f8c917298f1eedf2b4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
latex.py:
Classes to read and write LaTeX tables
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import re
from . import core
latexdicts = {
"AA": {
"tabletype": "table",
"header_start": r"\hline \hline",
"header_end": r"\hline",
"data_end": r"\hline",
},
"doublelines": {
"tabletype": "table",
"header_start": r"\hline \hline",
"header_end": r"\hline\hline",
"data_end": r"\hline\hline",
},
"template": {
"tabletype": "tabletype",
"caption": "caption",
"tablealign": "tablealign",
"col_align": "col_align",
"preamble": "preamble",
"header_start": "header_start",
"header_end": "header_end",
"data_start": "data_start",
"data_end": "data_end",
"tablefoot": "tablefoot",
"units": {"col1": "unit of col1", "col2": "unit of col2"},
},
}
RE_COMMENT = re.compile(r"(?<!\\)%") # % character but not \%
def add_dictval_to_list(adict, key, alist):
"""
Add a value from a dictionary to a list
Parameters
----------
adict : dictionary
key : hashable
alist : list
List where value should be added
"""
if key in adict:
if isinstance(adict[key], str):
alist.append(adict[key])
else:
alist.extend(adict[key])
def find_latex_line(lines, latex):
"""
Find the first line which matches a patters
Parameters
----------
lines : list
List of strings
latex : str
Search pattern
Returns
-------
line_num : int, None
Line number. Returns None, if no match was found
"""
re_string = re.compile(latex.replace("\\", "\\\\"))
for i, line in enumerate(lines):
if re_string.match(line):
return i
else:
return None
class LatexInputter(core.BaseInputter):
def process_lines(self, lines):
return [lin.strip() for lin in lines]
class LatexSplitter(core.BaseSplitter):
"""Split LaTeX table data. Default delimiter is `&`."""
delimiter = "&"
def __call__(self, lines):
last_line = RE_COMMENT.split(lines[-1])[0].strip()
if not last_line.endswith(r"\\"):
lines[-1] = last_line + r"\\"
return super().__call__(lines)
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. Also remove
\\ at end of line"""
line = RE_COMMENT.split(line)[0]
line = line.strip()
if line.endswith(r"\\"):
line = line.rstrip(r"\\")
else:
raise core.InconsistentTableError(
r"Lines in LaTeX table have to end with \\"
)
return line
def process_val(self, val):
"""Remove whitespace and {} at the beginning or end of value."""
val = val.strip()
if val and (val[0] == "{") and (val[-1] == "}"):
val = val[1:-1]
return val
def join(self, vals):
"""Join values together and add a few extra spaces for readability"""
delimiter = " " + self.delimiter + " "
return delimiter.join(x.strip() for x in vals) + r" \\"
class LatexHeader(core.BaseHeader):
"""Class to read the header of Latex Tables"""
header_start = r"\begin{tabular}"
splitter_class = LatexSplitter
def start_line(self, lines):
line = find_latex_line(lines, self.header_start)
if line is not None:
return line + 1
else:
return None
def _get_units(self):
units = {}
col_units = [col.info.unit for col in self.cols]
for name, unit in zip(self.colnames, col_units):
if unit:
try:
units[name] = unit.to_string(format="latex_inline")
except AttributeError:
units[name] = unit
return units
def write(self, lines):
if "col_align" not in self.latex:
self.latex["col_align"] = len(self.cols) * "c"
if "tablealign" in self.latex:
align = "[" + self.latex["tablealign"] + "]"
else:
align = ""
if self.latex["tabletype"] is not None:
lines.append(r"\begin{" + self.latex["tabletype"] + r"}" + align)
add_dictval_to_list(self.latex, "preamble", lines)
if "caption" in self.latex:
lines.append(r"\caption{" + self.latex["caption"] + "}")
lines.append(self.header_start + r"{" + self.latex["col_align"] + r"}")
add_dictval_to_list(self.latex, "header_start", lines)
lines.append(self.splitter.join(self.colnames))
units = self._get_units()
if "units" in self.latex:
units.update(self.latex["units"])
if units:
lines.append(
self.splitter.join([units.get(name, " ") for name in self.colnames])
)
add_dictval_to_list(self.latex, "header_end", lines)
class LatexData(core.BaseData):
"""Class to read the data in LaTeX tables"""
data_start = None
data_end = r"\end{tabular}"
splitter_class = LatexSplitter
def start_line(self, lines):
if self.data_start:
return find_latex_line(lines, self.data_start)
else:
start = self.header.start_line(lines)
if start is None:
raise core.InconsistentTableError(r"Could not find table start")
return start + 1
def end_line(self, lines):
if self.data_end:
return find_latex_line(lines, self.data_end)
else:
return None
def write(self, lines):
add_dictval_to_list(self.latex, "data_start", lines)
core.BaseData.write(self, lines)
add_dictval_to_list(self.latex, "data_end", lines)
lines.append(self.data_end)
add_dictval_to_list(self.latex, "tablefoot", lines)
if self.latex["tabletype"] is not None:
lines.append(r"\end{" + self.latex["tabletype"] + "}")
class Latex(core.BaseReader):
r"""LaTeX format table.
This class implements some LaTeX specific commands. Its main
purpose is to write out a table in a form that LaTeX can compile. It
is beyond the scope of this class to implement every possible LaTeX
command, instead the focus is to generate a syntactically valid
LaTeX tables.
This class can also read simple LaTeX tables (one line per table
row, no ``\multicolumn`` or similar constructs), specifically, it
can read the tables that it writes.
Reading a LaTeX table, the following keywords are accepted:
**ignore_latex_commands** :
Lines starting with these LaTeX commands will be treated as comments (i.e. ignored).
When writing a LaTeX table, the some keywords can customize the
format. Care has to be taken here, because python interprets ``\\``
in a string as an escape character. In order to pass this to the
output either format your strings as raw strings with the ``r``
specifier or use a double ``\\\\``.
Examples::
caption = r'My table \label{mytable}'
caption = 'My table \\\\label{mytable}'
**latexdict** : Dictionary of extra parameters for the LaTeX output
* tabletype : used for first and last line of table.
The default is ``\\begin{table}``. The following would generate a table,
which spans the whole page in a two-column document::
ascii.write(data, sys.stdout, Writer = ascii.Latex,
latexdict = {'tabletype': 'table*'})
If ``None``, the table environment will be dropped, keeping only
the ``tabular`` environment.
* tablealign : positioning of table in text.
The default is not to specify a position preference in the text.
If, e.g. the alignment is ``ht``, then the LaTeX will be ``\\begin{table}[ht]``.
* col_align : Alignment of columns
If not present all columns will be centered.
* caption : Table caption (string or list of strings)
This will appear above the table as it is the standard in
many scientific publications. If you prefer a caption below
the table, just write the full LaTeX command as
``latexdict['tablefoot'] = r'\caption{My table}'``
* preamble, header_start, header_end, data_start, data_end, tablefoot: Pure LaTeX
Each one can be a string or a list of strings. These strings
will be inserted into the table without any further
processing. See the examples below.
* units : dictionary of strings
Keys in this dictionary should be names of columns. If
present, a line in the LaTeX table directly below the column
names is added, which contains the values of the
dictionary. Example::
from astropy.io import ascii
data = {'name': ['bike', 'car'], 'mass': [75,1200], 'speed': [10, 130]}
ascii.write(data, Writer=ascii.Latex,
latexdict = {'units': {'mass': 'kg', 'speed': 'km/h'}})
If the column has no entry in the ``units`` dictionary, it defaults
to the **unit** attribute of the column. If this attribute is not
specified (i.e. it is None), the unit will be written as ``' '``.
Run the following code to see where each element of the
dictionary is inserted in the LaTeX table::
from astropy.io import ascii
data = {'cola': [1,2], 'colb': [3,4]}
ascii.write(data, Writer=ascii.Latex, latexdict=ascii.latex.latexdicts['template'])
Some table styles are predefined in the dictionary
``ascii.latex.latexdicts``. The following generates in table in
style preferred by A&A and some other journals::
ascii.write(data, Writer=ascii.Latex, latexdict=ascii.latex.latexdicts['AA'])
As an example, this generates a table, which spans all columns
and is centered on the page::
ascii.write(data, Writer=ascii.Latex, col_align='|lr|',
latexdict={'preamble': r'\begin{center}',
'tablefoot': r'\end{center}',
'tabletype': 'table*'})
**caption** : Set table caption
Shorthand for::
latexdict['caption'] = caption
**col_align** : Set the column alignment.
If not present this will be auto-generated for centered
columns. Shorthand for::
latexdict['col_align'] = col_align
"""
_format_name = "latex"
_io_registry_format_aliases = ["latex"]
_io_registry_suffix = ".tex"
_description = "LaTeX table"
header_class = LatexHeader
data_class = LatexData
inputter_class = LatexInputter
# Strictly speaking latex only supports 1-d columns so this should inherit
# the base max_ndim = 1. But as reported in #11695 this causes a strange
# problem with Jupyter notebook, which displays a table by first calling
# _repr_latex_. For a multidimensional table this issues a stack traceback
# before moving on to _repr_html_. Here we prioritize fixing the issue with
# Jupyter displaying a Table with multidimensional columns.
max_ndim = None
def __init__(
self,
ignore_latex_commands=[
"hline",
"vspace",
"tableline",
"toprule",
"midrule",
"bottomrule",
],
latexdict={},
caption="",
col_align=None,
):
super().__init__()
self.latex = {}
# The latex dict drives the format of the table and needs to be shared
# with data and header
self.header.latex = self.latex
self.data.latex = self.latex
self.latex["tabletype"] = "table"
self.latex.update(latexdict)
if caption:
self.latex["caption"] = caption
if col_align:
self.latex["col_align"] = col_align
self.ignore_latex_commands = ignore_latex_commands
self.header.comment = "%|" + "|".join(
[r"\\" + command for command in self.ignore_latex_commands]
)
self.data.comment = self.header.comment
def write(self, table=None):
self.header.start_line = None
self.data.start_line = None
return core.BaseReader.write(self, table=table)
class AASTexHeaderSplitter(LatexSplitter):
r"""Extract column names from a `deluxetable`_.
This splitter expects the following LaTeX code **in a single line**:
\tablehead{\colhead{col1} & ... & \colhead{coln}}
"""
def __call__(self, lines):
return super(LatexSplitter, self).__call__(lines)
def process_line(self, line):
"""extract column names from tablehead"""
line = line.split("%")[0]
line = line.replace(r"\tablehead", "")
line = line.strip()
if (line[0] == "{") and (line[-1] == "}"):
line = line[1:-1]
else:
raise core.InconsistentTableError(r"\tablehead is missing {}")
return line.replace(r"\colhead", "")
def join(self, vals):
return " & ".join([r"\colhead{" + str(x) + "}" for x in vals])
class AASTexHeader(LatexHeader):
r"""In a `deluxetable
<http://fits.gsfc.nasa.gov/standard30/deluxetable.sty>`_ some header
keywords differ from standard LaTeX.
This header is modified to take that into account.
"""
header_start = r"\tablehead"
splitter_class = AASTexHeaderSplitter
def start_line(self, lines):
return find_latex_line(lines, r"\tablehead")
def write(self, lines):
if "col_align" not in self.latex:
self.latex["col_align"] = len(self.cols) * "c"
if "tablealign" in self.latex:
align = "[" + self.latex["tablealign"] + "]"
else:
align = ""
lines.append(
r"\begin{"
+ self.latex["tabletype"]
+ r"}{"
+ self.latex["col_align"]
+ r"}"
+ align
)
add_dictval_to_list(self.latex, "preamble", lines)
if "caption" in self.latex:
lines.append(r"\tablecaption{" + self.latex["caption"] + "}")
tablehead = " & ".join([r"\colhead{" + name + "}" for name in self.colnames])
units = self._get_units()
if "units" in self.latex:
units.update(self.latex["units"])
if units:
tablehead += r"\\ " + self.splitter.join(
[units.get(name, " ") for name in self.colnames]
)
lines.append(r"\tablehead{" + tablehead + "}")
class AASTexData(LatexData):
r"""In a `deluxetable`_ the data is enclosed in `\startdata` and `\enddata`"""
data_start = r"\startdata"
data_end = r"\enddata"
def start_line(self, lines):
return find_latex_line(lines, self.data_start) + 1
def write(self, lines):
lines.append(self.data_start)
lines_length_initial = len(lines)
core.BaseData.write(self, lines)
# To remove extra space(s) and // appended which creates an extra new line
# in the end.
if len(lines) > lines_length_initial:
lines[-1] = re.sub(r"\s* \\ \\ \s* $", "", lines[-1], flags=re.VERBOSE)
lines.append(self.data_end)
add_dictval_to_list(self.latex, "tablefoot", lines)
lines.append(r"\end{" + self.latex["tabletype"] + r"}")
class AASTex(Latex):
"""AASTeX format table.
This class implements some AASTeX specific commands.
AASTeX is used for the AAS (American Astronomical Society)
publications like ApJ, ApJL and AJ.
It derives from the ``Latex`` reader and accepts the same
keywords. However, the keywords ``header_start``, ``header_end``,
``data_start`` and ``data_end`` in ``latexdict`` have no effect.
"""
_format_name = "aastex"
_io_registry_format_aliases = ["aastex"]
_io_registry_suffix = "" # AASTex inherits from Latex, so override this class attr
_description = "AASTeX deluxetable used for AAS journals"
header_class = AASTexHeader
data_class = AASTexData
def __init__(self, **kwargs):
super().__init__(**kwargs)
# check if tabletype was explicitly set by the user
if not (("latexdict" in kwargs) and ("tabletype" in kwargs["latexdict"])):
self.latex["tabletype"] = "deluxetable"
|
b40276d51d0676049837261c45129b0ee32b885eb3c3f02d1a2855a0393f904e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
ipac.py:
Classes to read IPAC table format
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import re
from collections import OrderedDict, defaultdict
from textwrap import wrap
from warnings import warn
from astropy.table.pprint import get_auto_format_func
from astropy.utils.exceptions import AstropyUserWarning
from . import basic, core, fixedwidth
class IpacFormatErrorDBMS(Exception):
def __str__(self):
return "{}\nSee {}".format(
super().__str__(),
"https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/DBMSrestriction.html",
)
class IpacFormatError(Exception):
def __str__(self):
return "{}\nSee {}".format(
super().__str__(),
"https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html",
)
class IpacHeaderSplitter(core.BaseSplitter):
"""Splitter for Ipac Headers.
This splitter is similar its parent when reading, but supports a
fixed width format (as required for Ipac table headers) for writing.
"""
process_line = None
process_val = None
delimiter = "|"
delimiter_pad = ""
skipinitialspace = False
comment = r"\s*\\"
write_comment = r"\\"
col_starts = None
col_ends = None
def join(self, vals, widths):
pad = self.delimiter_pad or ""
delimiter = self.delimiter or ""
padded_delim = pad + delimiter + pad
bookend_left = delimiter + pad
bookend_right = pad + delimiter
vals = [" " * (width - len(val)) + val for val, width in zip(vals, widths)]
return bookend_left + padded_delim.join(vals) + bookend_right
class IpacHeader(fixedwidth.FixedWidthHeader):
"""IPAC table header"""
splitter_class = IpacHeaderSplitter
# Defined ordered list of possible types. Ordering is needed to
# distinguish between "d" (double) and "da" (date) as defined by
# the IPAC standard for abbreviations. This gets used in get_col_type().
col_type_list = (
("integer", core.IntType),
("long", core.IntType),
("double", core.FloatType),
("float", core.FloatType),
("real", core.FloatType),
("char", core.StrType),
("date", core.StrType),
)
definition = "ignore"
start_line = None
def process_lines(self, lines):
"""Generator to yield IPAC header lines, i.e. those starting and ending with
delimiter character (with trailing whitespace stripped)"""
delim = self.splitter.delimiter
for line in lines:
line = line.rstrip()
if line.startswith(delim) and line.endswith(delim):
yield line.strip(delim)
def update_meta(self, lines, meta):
"""
Extract table-level comments and keywords for IPAC table. See:
https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html#kw
"""
def process_keyword_value(val):
"""
Take a string value and convert to float, int or str, and strip quotes
as needed.
"""
val = val.strip()
try:
val = int(val)
except Exception:
try:
val = float(val)
except Exception:
# Strip leading/trailing quote. The spec says that a matched pair
# of quotes is required, but this code will allow a non-quoted value.
for quote in ('"', "'"):
if val.startswith(quote) and val.endswith(quote):
val = val[1:-1]
break
return val
table_meta = meta["table"]
table_meta["comments"] = []
table_meta["keywords"] = OrderedDict()
keywords = table_meta["keywords"]
# fmt: off
re_keyword = re.compile(
r'\\'
r'(?P<name> \w+)'
r'\s* = (?P<value> .+) $',
re.VERBOSE
)
# fmt: on
for line in lines:
# Keywords and comments start with "\". Once the first non-slash
# line is seen then bail out.
if not line.startswith("\\"):
break
m = re_keyword.match(line)
if m:
name = m.group("name")
val = process_keyword_value(m.group("value"))
# IPAC allows for continuation keywords, e.g.
# \SQL = 'WHERE '
# \SQL = 'SELECT (25 column names follow in next row.)'
if name in keywords and isinstance(val, str):
prev_val = keywords[name]["value"]
if isinstance(prev_val, str):
val = prev_val + val
keywords[name] = {"value": val}
else:
# Comment is required to start with "\ "
if line.startswith("\\ "):
val = line[2:].strip()
if val:
table_meta["comments"].append(val)
def get_col_type(self, col):
for col_type_key, col_type in self.col_type_list:
if col_type_key.startswith(col.raw_type.lower()):
return col_type
else:
raise ValueError(
f'Unknown data type ""{col.raw_type}"" for column "{col.name}"'
)
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines``.
Based on the previously set Header attributes find or create the column names.
Sets ``self.cols`` with the list of Columns.
Parameters
----------
lines : list
List of table lines
"""
# generator returning valid header lines
header_lines = self.process_lines(lines)
header_vals = [vals for vals in self.splitter(header_lines)]
if len(header_vals) == 0:
raise ValueError(
"At least one header line beginning and ending with delimiter required"
)
elif len(header_vals) > 4:
raise ValueError("More than four header lines were found")
# Generate column definitions
cols = []
start = 1
for i, name in enumerate(header_vals[0]):
col = core.Column(name=name.strip(" -"))
col.start = start
col.end = start + len(name)
if len(header_vals) > 1:
col.raw_type = header_vals[1][i].strip(" -")
col.type = self.get_col_type(col)
if len(header_vals) > 2:
col.unit = header_vals[2][i].strip() or None # Can't strip dashes here
if len(header_vals) > 3:
# The IPAC null value corresponds to the io.ascii bad_value.
# In this case there isn't a fill_value defined, so just put
# in the minimal entry that is sure to convert properly to the
# required type.
#
# Strip spaces but not dashes (not allowed in NULL row per
# https://github.com/astropy/astropy/issues/361)
null = header_vals[3][i].strip()
fillval = "" if issubclass(col.type, core.StrType) else "0"
self.data.fill_values.append((null, fillval, col.name))
start = col.end + 1
cols.append(col)
# Correct column start/end based on definition
if self.ipac_definition == "right":
col.start -= 1
elif self.ipac_definition == "left":
col.end += 1
self.names = [x.name for x in cols]
self.cols = cols
def str_vals(self):
if self.DBMS:
IpacFormatE = IpacFormatErrorDBMS
else:
IpacFormatE = IpacFormatError
namelist = self.colnames
if self.DBMS:
countnamelist = defaultdict(int)
for name in self.colnames:
countnamelist[name.lower()] += 1
doublenames = [x for x in countnamelist if countnamelist[x] > 1]
if doublenames != []:
raise IpacFormatE(
"IPAC DBMS tables are not case sensitive. "
f"This causes duplicate column names: {doublenames}"
)
for name in namelist:
m = re.match(r"\w+", name)
if m.end() != len(name):
raise IpacFormatE(
f"{name} - Only alphanumeric characters and _ "
"are allowed in column names."
)
if self.DBMS and not (name[0].isalpha() or (name[0] == "_")):
raise IpacFormatE(f"Column name cannot start with numbers: {name}")
if self.DBMS:
if name in ["x", "y", "z", "X", "Y", "Z"]:
raise IpacFormatE(
f"{name} - x, y, z, X, Y, Z are reserved names and "
"cannot be used as column names."
)
if len(name) > 16:
raise IpacFormatE(
f"{name} - Maximum length for column name is 16 characters"
)
else:
if len(name) > 40:
raise IpacFormatE(
f"{name} - Maximum length for column name is 40 characters."
)
dtypelist = []
unitlist = []
nullist = []
for col in self.cols:
col_dtype = col.info.dtype
col_unit = col.info.unit
col_format = col.info.format
if col_dtype.kind in ["i", "u"]:
if col_dtype.itemsize <= 2:
dtypelist.append("int")
else:
dtypelist.append("long")
elif col_dtype.kind == "f":
if col_dtype.itemsize <= 4:
dtypelist.append("float")
else:
dtypelist.append("double")
else:
dtypelist.append("char")
if col_unit is None:
unitlist.append("")
else:
unitlist.append(str(col.info.unit))
# This may be incompatible with mixin columns
null = col.fill_values[core.masked]
try:
auto_format_func = get_auto_format_func(col)
format_func = col.info._format_funcs.get(col_format, auto_format_func)
nullist.append((format_func(col_format, null)).strip())
except Exception:
# It is possible that null and the column values have different
# data types (e.g. number and null = 'null' (i.e. a string).
# This could cause all kinds of exceptions, so a catch all
# block is needed here
nullist.append(str(null).strip())
return [namelist, dtypelist, unitlist, nullist]
def write(self, lines, widths):
"""Write header.
The width of each column is determined in Ipac.write. Writing the header
must be delayed until that time.
This function is called from there, once the width information is
available."""
for vals in self.str_vals():
lines.append(self.splitter.join(vals, widths))
return lines
class IpacDataSplitter(fixedwidth.FixedWidthSplitter):
delimiter = " "
delimiter_pad = ""
bookend = True
class IpacData(fixedwidth.FixedWidthData):
"""IPAC table data reader"""
comment = r"[|\\]"
start_line = 0
splitter_class = IpacDataSplitter
fill_values = [(core.masked, "null")]
def write(self, lines, widths, vals_list):
"""IPAC writer, modified from FixedWidth writer"""
for vals in vals_list:
lines.append(self.splitter.join(vals, widths))
return lines
class Ipac(basic.Basic):
r"""IPAC format table.
See: https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html
Example::
\\name=value
\\ Comment
| column1 | column2 | column3 | column4 | column5 |
| double | double | int | double | char |
| unit | unit | unit | unit | unit |
| null | null | null | null | null |
2.0978 29.09056 73765 2.06000 B8IVpMnHg
Or::
|-----ra---|----dec---|---sao---|------v---|----sptype--------|
2.09708 29.09056 73765 2.06000 B8IVpMnHg
The comments and keywords defined in the header are available via the output
table ``meta`` attribute::
>>> import os
>>> from astropy.io import ascii
>>> filename = os.path.join(ascii.__path__[0], 'tests/data/ipac.dat')
>>> data = ascii.read(filename)
>>> print(data.meta['comments'])
['This is an example of a valid comment']
>>> for name, keyword in data.meta['keywords'].items():
... print(name, keyword['value'])
...
intval 1
floatval 2300.0
date Wed Sp 20 09:48:36 1995
key_continue IPAC keywords can continue across lines
Note that there are different conventions for characters occurring below the
position of the ``|`` symbol in IPAC tables. By default, any character
below a ``|`` will be ignored (since this is the current standard),
but if you need to read files that assume characters below the ``|``
symbols belong to the column before or after the ``|``, you can specify
``definition='left'`` or ``definition='right'`` respectively when reading
the table (the default is ``definition='ignore'``). The following examples
demonstrate the different conventions:
* ``definition='ignore'``::
| ra | dec |
| float | float |
1.2345 6.7890
* ``definition='left'``::
| ra | dec |
| float | float |
1.2345 6.7890
* ``definition='right'``::
| ra | dec |
| float | float |
1.2345 6.7890
IPAC tables can specify a null value in the header that is shown in place
of missing or bad data. On writing, this value defaults to ``null``.
To specify a different null value, use the ``fill_values`` option to
replace masked values with a string or number of your choice as
described in :ref:`astropy:io_ascii_write_parameters`::
>>> from astropy.io.ascii import masked
>>> fill = [(masked, 'N/A', 'ra'), (masked, -999, 'sptype')]
>>> ascii.write(data, format='ipac', fill_values=fill)
\ This is an example of a valid comment
...
| ra| dec| sai| v2| sptype|
| double| double| long| double| char|
| unit| unit| unit| unit| ergs|
| N/A| null| null| null| -999|
N/A 29.09056 null 2.06 -999
2345678901.0 3456789012.0 456789012 4567890123.0 567890123456789012
When writing a table with a column of integers, the data type is output
as ``int`` when the column ``dtype.itemsize`` is less than or equal to 2;
otherwise the data type is ``long``. For a column of floating-point values,
the data type is ``float`` when ``dtype.itemsize`` is less than or equal
to 4; otherwise the data type is ``double``.
Parameters
----------
definition : str, optional
Specify the convention for characters in the data table that occur
directly below the pipe (``|``) symbol in the header column definition:
* 'ignore' - Any character beneath a pipe symbol is ignored (default)
* 'right' - Character is associated with the column to the right
* 'left' - Character is associated with the column to the left
DBMS : bool, optional
If true, this verifies that written tables adhere (semantically)
to the `IPAC/DBMS
<https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/DBMSrestriction.html>`_
definition of IPAC tables. If 'False' it only checks for the (less strict)
`IPAC <https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html>`_
definition.
"""
_format_name = "ipac"
_io_registry_format_aliases = ["ipac"]
_io_registry_can_write = True
_description = "IPAC format table"
data_class = IpacData
header_class = IpacHeader
def __init__(self, definition="ignore", DBMS=False):
super().__init__()
# Usually the header is not defined in __init__, but here it need a keyword
if definition in ["ignore", "left", "right"]:
self.header.ipac_definition = definition
else:
raise ValueError("definition should be one of ignore/left/right")
self.header.DBMS = DBMS
def write(self, table):
"""
Write ``table`` as list of strings.
Parameters
----------
table : `~astropy.table.Table`
Input table data
Returns
-------
lines : list
List of strings corresponding to ASCII table
"""
# Set a default null value for all columns by adding at the end, which
# is the position with the lowest priority.
# We have to do it this late, because the fill_value
# defined in the class can be overwritten by ui.write
self.data.fill_values.append((core.masked, "null"))
# Check column names before altering
self.header.cols = list(table.columns.values())
self.header.check_column_names(self.names, self.strict_names, self.guessing)
core._apply_include_exclude_names(
table, self.names, self.include_names, self.exclude_names
)
# Check that table has only 1-d columns.
self._check_multidim_table(table)
# Now use altered columns
new_cols = list(table.columns.values())
# link information about the columns to the writer object (i.e. self)
self.header.cols = new_cols
self.data.cols = new_cols
# Write header and data to lines list
lines = []
# Write meta information
if "comments" in table.meta:
for comment in table.meta["comments"]:
if len(str(comment)) > 78:
warn(
"Comment string > 78 characters was automatically wrapped.",
AstropyUserWarning,
)
for line in wrap(
str(comment), 80, initial_indent="\\ ", subsequent_indent="\\ "
):
lines.append(line)
if "keywords" in table.meta:
keydict = table.meta["keywords"]
for keyword in keydict:
try:
val = keydict[keyword]["value"]
lines.append(f"\\{keyword.strip()}={val!r}")
# meta is not standardized: Catch some common Errors.
except TypeError:
warn(
f"Table metadata keyword {keyword} has been skipped. "
"IPAC metadata must be in the form {{'keywords':"
"{{'keyword': {{'value': value}} }}",
AstropyUserWarning,
)
ignored_keys = [
key for key in table.meta if key not in ("keywords", "comments")
]
if any(ignored_keys):
warn(
f"Table metadata keyword(s) {ignored_keys} were not written. "
"IPAC metadata must be in the form {{'keywords':"
"{{'keyword': {{'value': value}} }}",
AstropyUserWarning,
)
# Usually, this is done in data.write, but since the header is written
# first, we need that here.
self.data._set_fill_values(self.data.cols)
# get header and data as strings to find width of each column
for i, col in enumerate(table.columns.values()):
col.headwidth = max(len(vals[i]) for vals in self.header.str_vals())
# keep data_str_vals because they take some time to make
data_str_vals = []
col_str_iters = self.data.str_vals()
for vals in zip(*col_str_iters):
data_str_vals.append(vals)
for i, col in enumerate(table.columns.values()):
# FIXME: In Python 3.4, use max([], default=0).
# See: https://docs.python.org/3/library/functions.html#max
if data_str_vals:
col.width = max(len(vals[i]) for vals in data_str_vals)
else:
col.width = 0
widths = [max(col.width, col.headwidth) for col in table.columns.values()]
# then write table
self.header.write(lines, widths)
self.data.write(lines, widths, data_str_vals)
return lines
|
8b2334d83733c4b5009eae70678c0b1e993107310ab5f53e0385b1b4307fa766 | # Licensed under a 3-clause BSD style license
import os
import numpy
from setuptools import Extension
ROOT = os.path.relpath(os.path.dirname(__file__))
def get_extensions():
sources = [
os.path.join(ROOT, "cparser.pyx"),
os.path.join(ROOT, "src", "tokenizer.c"),
]
ascii_ext = Extension(
name="astropy.io.ascii.cparser",
include_dirs=[numpy.get_include()],
sources=sources,
)
return [ascii_ext]
|
11bfb79f381822f685f6d0fe79bb399ac09a460fa0cc706925fc1835221fa8d8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible HTML table reader and writer.
html.py:
Classes to read and write HTML tables
`BeautifulSoup <http://www.crummy.com/software/BeautifulSoup/>`_
must be installed to read HTML tables.
"""
import warnings
from copy import deepcopy
from astropy.table import Column
from astropy.utils.xml import writer
from . import core
class SoupString(str):
"""
Allows for strings to hold BeautifulSoup data.
"""
def __new__(cls, *args, **kwargs):
return str.__new__(cls, *args, **kwargs)
def __init__(self, val):
self.soup = val
class ListWriter:
"""
Allows for XMLWriter to write to a list instead of a file.
"""
def __init__(self, out):
self.out = out
def write(self, data):
self.out.append(data)
def identify_table(soup, htmldict, numtable):
"""
Checks whether the given BeautifulSoup tag is the table
the user intends to process.
"""
if soup is None or soup.name != "table":
return False # Tag is not a <table>
elif "table_id" not in htmldict:
return numtable == 1
table_id = htmldict["table_id"]
if isinstance(table_id, str):
return "id" in soup.attrs and soup["id"] == table_id
elif isinstance(table_id, int):
return table_id == numtable
# Return False if an invalid parameter is given
return False
class HTMLInputter(core.BaseInputter):
"""
Input lines of HTML in a valid form.
This requires `BeautifulSoup
<http://www.crummy.com/software/BeautifulSoup/>`_ to be installed.
"""
def process_lines(self, lines):
"""
Convert the given input into a list of SoupString rows
for further processing.
"""
try:
from bs4 import BeautifulSoup
except ImportError:
raise core.OptionalTableImportError(
"BeautifulSoup must be installed to read HTML tables"
)
if "parser" not in self.html:
with warnings.catch_warnings():
# Ignore bs4 parser warning #4550.
warnings.filterwarnings(
"ignore", ".*no parser was explicitly specified.*"
)
soup = BeautifulSoup("\n".join(lines))
else: # use a custom backend parser
soup = BeautifulSoup("\n".join(lines), self.html["parser"])
tables = soup.find_all("table")
for i, possible_table in enumerate(tables):
if identify_table(possible_table, self.html, i + 1):
table = possible_table # Find the correct table
break
else:
if isinstance(self.html["table_id"], int):
err_descr = f"number {self.html['table_id']}"
else:
err_descr = f"id '{self.html['table_id']}'"
raise core.InconsistentTableError(
f"ERROR: HTML table {err_descr} not found"
)
# Get all table rows
soup_list = [SoupString(x) for x in table.find_all("tr")]
return soup_list
class HTMLSplitter(core.BaseSplitter):
"""
Split HTML table data.
"""
def __call__(self, lines):
"""
Return HTML data from lines as a generator.
"""
for line in lines:
if not isinstance(line, SoupString):
raise TypeError("HTML lines should be of type SoupString")
soup = line.soup
header_elements = soup.find_all("th")
if header_elements:
# Return multicolumns as tuples for HTMLHeader handling
yield [
(el.text.strip(), el["colspan"])
if el.has_attr("colspan")
else el.text.strip()
for el in header_elements
]
data_elements = soup.find_all("td")
if data_elements:
yield [el.text.strip() for el in data_elements]
if len(lines) == 0:
raise core.InconsistentTableError(
"HTML tables must contain data in a <table> tag"
)
class HTMLOutputter(core.TableOutputter):
"""
Output the HTML data as an ``astropy.table.Table`` object.
This subclass allows for the final table to contain
multidimensional columns (defined using the colspan attribute
of <th>).
"""
default_converters = [
core.convert_numpy(int),
core.convert_numpy(float),
core.convert_numpy(str),
]
def __call__(self, cols, meta):
"""
Process the data in multidimensional columns.
"""
new_cols = []
col_num = 0
while col_num < len(cols):
col = cols[col_num]
if hasattr(col, "colspan"):
# Join elements of spanned columns together into list of tuples
span_cols = cols[col_num : col_num + col.colspan]
new_col = core.Column(col.name)
new_col.str_vals = list(zip(*[x.str_vals for x in span_cols]))
new_cols.append(new_col)
col_num += col.colspan
else:
new_cols.append(col)
col_num += 1
return super().__call__(new_cols, meta)
class HTMLHeader(core.BaseHeader):
splitter_class = HTMLSplitter
def start_line(self, lines):
"""
Return the line number at which header data begins.
"""
for i, line in enumerate(lines):
if not isinstance(line, SoupString):
raise TypeError("HTML lines should be of type SoupString")
soup = line.soup
if soup.th is not None:
return i
return None
def _set_cols_from_names(self):
"""
Set columns from header names, handling multicolumns appropriately.
"""
self.cols = []
new_names = []
for name in self.names:
if isinstance(name, tuple):
col = core.Column(name=name[0])
col.colspan = int(name[1])
self.cols.append(col)
new_names.append(name[0])
for i in range(1, int(name[1])):
# Add dummy columns
self.cols.append(core.Column(""))
new_names.append("")
else:
self.cols.append(core.Column(name=name))
new_names.append(name)
self.names = new_names
class HTMLData(core.BaseData):
splitter_class = HTMLSplitter
def start_line(self, lines):
"""
Return the line number at which table data begins.
"""
for i, line in enumerate(lines):
if not isinstance(line, SoupString):
raise TypeError("HTML lines should be of type SoupString")
soup = line.soup
if soup.td is not None:
if soup.th is not None:
raise core.InconsistentTableError(
"HTML tables cannot have headings and data in the same row"
)
return i
raise core.InconsistentTableError("No start line found for HTML data")
def end_line(self, lines):
"""
Return the line number at which table data ends.
"""
last_index = -1
for i, line in enumerate(lines):
if not isinstance(line, SoupString):
raise TypeError("HTML lines should be of type SoupString")
soup = line.soup
if soup.td is not None:
last_index = i
if last_index == -1:
return None
return last_index + 1
class HTML(core.BaseReader):
"""HTML format table.
In order to customize input and output, a dict of parameters may
be passed to this class holding specific customizations.
**htmldict** : Dictionary of parameters for HTML input/output.
* css : Customized styling
If present, this parameter will be included in a <style>
tag and will define stylistic attributes of the output.
* table_id : ID for the input table
If a string, this defines the HTML id of the table to be processed.
If an integer, this specifies the index of the input table in the
available tables. Unless this parameter is given, the reader will
use the first table found in the input file.
* multicol : Use multi-dimensional columns for output
The writer will output tuples as elements of multi-dimensional
columns if this parameter is true, and if not then it will
use the syntax 1.36583e-13 .. 1.36583e-13 for output. If not
present, this parameter will be true by default.
* raw_html_cols : column name or list of names with raw HTML content
This allows one to include raw HTML content in the column output,
for instance to include link references in a table. This option
requires that the bleach package be installed. Only whitelisted
tags are allowed through for security reasons (see the
raw_html_clean_kwargs arg).
* raw_html_clean_kwargs : dict of keyword args controlling HTML cleaning
Raw HTML will be cleaned to prevent unsafe HTML from ending up in
the table output. This is done by calling ``bleach.clean(data,
**raw_html_clean_kwargs)``. For details on the available options
(e.g. tag whitelist) see:
https://bleach.readthedocs.io/en/latest/clean.html
* parser : Specific HTML parsing library to use
If specified, this specifies which HTML parsing library
BeautifulSoup should use as a backend. The options to choose
from are 'html.parser' (the standard library parser), 'lxml'
(the recommended parser), 'xml' (lxml's XML parser), and
'html5lib'. html5lib is a highly lenient parser and therefore
might work correctly for unusual input if a different parser
fails.
* jsfiles : list of js files to include when writing table.
* cssfiles : list of css files to include when writing table.
* js : js script to include in the body when writing table.
* table_class : css class for the table
"""
_format_name = "html"
_io_registry_format_aliases = ["html"]
_io_registry_suffix = ".html"
_description = "HTML table"
header_class = HTMLHeader
data_class = HTMLData
inputter_class = HTMLInputter
max_ndim = 2 # HTML supports writing 2-d columns with shape (n, m)
def __init__(self, htmldict={}):
"""
Initialize classes for HTML reading and writing.
"""
super().__init__()
self.html = deepcopy(htmldict)
if "multicol" not in htmldict:
self.html["multicol"] = True
if "table_id" not in htmldict:
self.html["table_id"] = 1
self.inputter.html = self.html
def read(self, table):
"""
Read the ``table`` in HTML format and return a resulting ``Table``.
"""
self.outputter = HTMLOutputter()
return super().read(table)
def write(self, table):
"""
Return data in ``table`` converted to HTML as a list of strings.
"""
# Check that table has only 1-d or 2-d columns. Above that fails.
self._check_multidim_table(table)
cols = list(table.columns.values())
self.data.header.cols = cols
self.data.cols = cols
if isinstance(self.data.fill_values, tuple):
self.data.fill_values = [self.data.fill_values]
self.data._set_fill_values(cols)
self.data._set_col_formats()
lines = []
# Set HTML escaping to False for any column in the raw_html_cols input
raw_html_cols = self.html.get("raw_html_cols", [])
if isinstance(raw_html_cols, str):
raw_html_cols = [raw_html_cols] # Allow for a single string as input
cols_escaped = [col.info.name not in raw_html_cols for col in cols]
# Kwargs that get passed on to bleach.clean() if that is available.
raw_html_clean_kwargs = self.html.get("raw_html_clean_kwargs", {})
# Use XMLWriter to output HTML to lines
w = writer.XMLWriter(ListWriter(lines))
with w.tag("html"):
with w.tag("head"):
# Declare encoding and set CSS style for table
with w.tag("meta", attrib={"charset": "utf-8"}):
pass
with w.tag(
"meta",
attrib={
"http-equiv": "Content-type",
"content": "text/html;charset=UTF-8",
},
):
pass
if "css" in self.html:
with w.tag("style"):
w.data(self.html["css"])
if "cssfiles" in self.html:
for filename in self.html["cssfiles"]:
with w.tag(
"link", rel="stylesheet", href=filename, type="text/css"
):
pass
if "jsfiles" in self.html:
for filename in self.html["jsfiles"]:
with w.tag("script", src=filename):
# need this instead of pass to get <script></script>
w.data("")
with w.tag("body"):
if "js" in self.html:
with w.xml_cleaning_method("none"):
with w.tag("script"):
w.data(self.html["js"])
if isinstance(self.html["table_id"], str):
html_table_id = self.html["table_id"]
else:
html_table_id = None
if "table_class" in self.html:
html_table_class = self.html["table_class"]
attrib = {"class": html_table_class}
else:
attrib = {}
with w.tag("table", id=html_table_id, attrib=attrib):
with w.tag("thead"):
with w.tag("tr"):
for col in cols:
if len(col.shape) > 1 and self.html["multicol"]:
# Set colspan attribute for multicolumns
w.start("th", colspan=col.shape[1])
else:
w.start("th")
w.data(col.info.name.strip())
w.end(indent=False)
col_str_iters = []
new_cols_escaped = []
# Make a container to hold any new_col objects created
# below for multicolumn elements. This is purely to
# maintain a reference for these objects during
# subsequent iteration to format column values. This
# requires that the weakref info._parent be maintained.
new_cols = []
for col, col_escaped in zip(cols, cols_escaped):
if len(col.shape) > 1 and self.html["multicol"]:
span = col.shape[1]
for i in range(span):
# Split up multicolumns into separate columns
new_col = Column([el[i] for el in col])
new_col_iter_str_vals = self.fill_values(
col, new_col.info.iter_str_vals()
)
col_str_iters.append(new_col_iter_str_vals)
new_cols_escaped.append(col_escaped)
new_cols.append(new_col)
else:
col_iter_str_vals = self.fill_values(
col, col.info.iter_str_vals()
)
col_str_iters.append(col_iter_str_vals)
new_cols_escaped.append(col_escaped)
for row in zip(*col_str_iters):
with w.tag("tr"):
for el, col_escaped in zip(row, new_cols_escaped):
# Potentially disable HTML escaping for column
method = "escape_xml" if col_escaped else "bleach_clean"
with w.xml_cleaning_method(
method, **raw_html_clean_kwargs
):
w.start("td")
w.data(el.strip())
w.end(indent=False)
# Fixes XMLWriter's insertion of unwanted line breaks
return ["".join(lines)]
def fill_values(self, col, col_str_iters):
"""
Return an iterator of the values with replacements based on fill_values
"""
# check if the col is a masked column and has fill values
is_masked_column = hasattr(col, "mask")
has_fill_values = hasattr(col, "fill_values")
for idx, col_str in enumerate(col_str_iters):
if is_masked_column and has_fill_values:
if col.mask[idx]:
yield col.fill_values[core.masked]
continue
if has_fill_values:
if col_str in col.fill_values:
yield col.fill_values[col_str]
continue
yield col_str
|
4424d701fe2d493795ee01655f33a076cda21b17f8baaca06ea6d96a77c3ecca | # Licensed under a 3-clause BSD style license - see LICENSE.rst
""" sextractor.py:
Classes to read SExtractor table format
Built on daophot.py:
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import re
from . import core
class SExtractorHeader(core.BaseHeader):
"""Read the header from a file produced by SExtractor."""
comment = r"^\s*#\s*\S\D.*" # Find lines that don't have "# digit"
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines`` for a SExtractor
header. The SExtractor header is specialized so that we just copy the entire BaseHeader
get_cols routine and modify as needed.
Parameters
----------
lines : list
List of table lines
"""
# This assumes that the columns are listed in order, one per line with a
# header comment string of the format: "# 1 ID short description [unit]"
# However, some may be missing and must be inferred from skipped column numbers
columns = {}
# E.g. '# 1 ID identification number' (no units) or '# 2 MAGERR magnitude of error [mag]'
# Updated along with issue #4603, for more robust parsing of unit
re_name_def = re.compile(
r"""^\s* \# \s* # possible whitespace around #
(?P<colnumber> [0-9]+)\s+ # number of the column in table
(?P<colname> [-\w]+) # name of the column
# column description, match any character until...
(?:\s+(?P<coldescr> \w .+)
# ...until [non-space][space][unit] or [not-right-bracket][end]
(?:(?<!(\]))$|(?=(?:(?<=\S)\s+\[.+\]))))?
(?:\s*\[(?P<colunit>.+)\])?.* # match units in brackets
""",
re.VERBOSE,
)
dataline = None
for line in lines:
if not line.startswith("#"):
dataline = line # save for later to infer the actual number of columns
break # End of header lines
else:
match = re_name_def.search(line)
if match:
colnumber = int(match.group("colnumber"))
colname = match.group("colname")
coldescr = match.group("coldescr")
# If no units are given, colunit = None
colunit = match.group("colunit")
columns[colnumber] = (colname, coldescr, colunit)
# Handle skipped column numbers
colnumbers = sorted(columns)
# Handle the case where the last column is array-like by append a pseudo column
# If there are more data columns than the largest column number
# then add a pseudo-column that will be dropped later. This allows
# the array column logic below to work in all cases.
if dataline is not None:
n_data_cols = len(dataline.split())
else:
# handles no data, where we have to rely on the last column number
n_data_cols = colnumbers[-1]
# sextractor column number start at 1.
columns[n_data_cols + 1] = (None, None, None)
colnumbers.append(n_data_cols + 1)
if len(columns) > 1:
# only fill in skipped columns when there is genuine column initially
previous_column = 0
for n in colnumbers:
if n != previous_column + 1:
for c in range(previous_column + 1, n):
column_name = (
columns[previous_column][0] + f"_{c - previous_column}"
)
column_descr = columns[previous_column][1]
column_unit = columns[previous_column][2]
columns[c] = (column_name, column_descr, column_unit)
previous_column = n
# Add the columns in order to self.names
colnumbers = sorted(columns)[:-1] # drop the pseudo column
self.names = []
for n in colnumbers:
self.names.append(columns[n][0])
if not self.names:
raise core.InconsistentTableError(
"No column names found in SExtractor header"
)
self.cols = []
for n in colnumbers:
col = core.Column(name=columns[n][0])
col.description = columns[n][1]
col.unit = columns[n][2]
self.cols.append(col)
class SExtractorData(core.BaseData):
start_line = 0
delimiter = " "
comment = r"\s*#"
class SExtractor(core.BaseReader):
"""SExtractor format table.
SExtractor is a package for faint-galaxy photometry (Bertin & Arnouts
1996, A&A Supp. 317, 393.)
See: https://sextractor.readthedocs.io/en/latest/
Example::
# 1 NUMBER
# 2 ALPHA_J2000
# 3 DELTA_J2000
# 4 FLUX_RADIUS
# 7 MAG_AUTO [mag]
# 8 X2_IMAGE Variance along x [pixel**2]
# 9 X_MAMA Barycenter position along MAMA x axis [m**(-6)]
# 10 MU_MAX Peak surface brightness above background [mag * arcsec**(-2)]
1 32.23222 10.1211 0.8 1.2 1.4 18.1 1000.0 0.00304 -3.498
2 38.12321 -88.1321 2.2 2.4 3.1 17.0 1500.0 0.00908 1.401
Note the skipped numbers since flux_radius has 3 columns. The three
FLUX_RADIUS columns will be named FLUX_RADIUS, FLUX_RADIUS_1, FLUX_RADIUS_2
Also note that a post-ID description (e.g. "Variance along x") is optional
and that units may be specified at the end of a line in brackets.
"""
_format_name = "sextractor"
_io_registry_can_write = False
_description = "SExtractor format table"
header_class = SExtractorHeader
data_class = SExtractorData
inputter_class = core.ContinuationLinesInputter
def read(self, table):
"""
Read input data (file-like object, filename, list of strings, or
single string) into a Table and return the result.
"""
out = super().read(table)
# remove the comments
if "comments" in out.meta:
del out.meta["comments"]
return out
def write(self, table):
raise NotImplementedError
|
4cfd9432fb3758d8aee48d71855116e5056207f97b67d6b44d30ea9e993525a4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains functions for reading and writing QDP tables that are
not meant to be used directly, but instead are available as readers/writers in
`astropy.table`. See :ref:`astropy:table_io` for more details.
"""
import copy
import re
import warnings
from collections.abc import Iterable
import numpy as np
from astropy.table import Table
from astropy.utils.exceptions import AstropyUserWarning
from . import basic, core
def _line_type(line, delimiter=None):
"""Interpret a QDP file line
Parameters
----------
line : str
a single line of the file
Returns
-------
type : str
Line type: "comment", "command", or "data"
Examples
--------
>>> _line_type("READ SERR 3")
'command'
>>> _line_type(" \\n !some gibberish")
'comment'
>>> _line_type(" ")
'comment'
>>> _line_type(" 21345.45")
'data,1'
>>> _line_type(" 21345.45 1.53e-3 1e-3 .04 NO nan")
'data,6'
>>> _line_type(" 21345.45,1.53e-3,1e-3,.04,NO,nan", delimiter=',')
'data,6'
>>> _line_type(" 21345.45 ! a comment to disturb")
'data,1'
>>> _line_type("NO NO NO NO NO")
'new'
>>> _line_type("NO,NO,NO,NO,NO", delimiter=',')
'new'
>>> _line_type("N O N NOON OON O")
Traceback (most recent call last):
...
ValueError: Unrecognized QDP line...
>>> _line_type(" some non-comment gibberish")
Traceback (most recent call last):
...
ValueError: Unrecognized QDP line...
"""
_decimal_re = r"[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?"
_command_re = r"READ [TS]ERR(\s+[0-9]+)+"
sep = delimiter
if delimiter is None:
sep = r"\s+"
_new_re = rf"NO({sep}NO)+"
_data_re = rf"({_decimal_re}|NO|[-+]?nan)({sep}({_decimal_re}|NO|[-+]?nan))*)"
_type_re = rf"^\s*((?P<command>{_command_re})|(?P<new>{_new_re})|(?P<data>{_data_re})?\s*(\!(?P<comment>.*))?\s*$"
_line_type_re = re.compile(_type_re)
line = line.strip()
if not line:
return "comment"
match = _line_type_re.match(line)
if match is None:
raise ValueError(f"Unrecognized QDP line: {line}")
for type_, val in match.groupdict().items():
if val is None:
continue
if type_ == "data":
return f"data,{len(val.split(sep=delimiter))}"
else:
return type_
def _get_type_from_list_of_lines(lines, delimiter=None):
"""Read through the list of QDP file lines and label each line by type
Parameters
----------
lines : list
List containing one file line in each entry
Returns
-------
contents : list
List containing the type for each line (see `line_type_and_data`)
ncol : int
The number of columns in the data lines. Must be the same throughout
the file
Examples
--------
>>> line0 = "! A comment"
>>> line1 = "543 12 456.0"
>>> lines = [line0, line1]
>>> types, ncol = _get_type_from_list_of_lines(lines)
>>> types[0]
'comment'
>>> types[1]
'data,3'
>>> ncol
3
>>> lines.append("23")
>>> _get_type_from_list_of_lines(lines)
Traceback (most recent call last):
...
ValueError: Inconsistent number of columns
"""
types = [_line_type(line, delimiter=delimiter) for line in lines]
current_ncol = None
for type_ in types:
if type_.startswith("data,"):
ncol = int(type_[5:])
if current_ncol is None:
current_ncol = ncol
elif ncol != current_ncol:
raise ValueError("Inconsistent number of columns")
return types, current_ncol
def _get_lines_from_file(qdp_file):
if "\n" in qdp_file:
lines = qdp_file.split("\n")
elif isinstance(qdp_file, str):
with open(qdp_file) as fobj:
lines = [line.strip() for line in fobj.readlines()]
elif isinstance(qdp_file, Iterable):
lines = qdp_file
else:
raise ValueError("invalid value of qdb_file")
return lines
def _interpret_err_lines(err_specs, ncols, names=None):
"""Give list of column names from the READ SERR and TERR commands
Parameters
----------
err_specs : dict
``{'serr': [n0, n1, ...], 'terr': [n2, n3, ...]}``
Error specifications for symmetric and two-sided errors
ncols : int
Number of data columns
Other Parameters
----------------
names : list of str
Name of data columns (defaults to ['col1', 'col2', ...]), _not_
including error columns.
Returns
-------
colnames : list
List containing the column names. Error columns will have the name
of the main column plus ``_err`` for symmetric errors, and ``_perr``
and ``_nerr`` for positive and negative errors respectively
Examples
--------
>>> col_in = ['MJD', 'Rate']
>>> cols = _interpret_err_lines(None, 2, names=col_in)
>>> cols[0]
'MJD'
>>> err_specs = {'terr': [1], 'serr': [2]}
>>> ncols = 5
>>> cols = _interpret_err_lines(err_specs, ncols, names=col_in)
>>> cols[0]
'MJD'
>>> cols[2]
'MJD_nerr'
>>> cols[4]
'Rate_err'
>>> _interpret_err_lines(err_specs, 6, names=col_in)
Traceback (most recent call last):
...
ValueError: Inconsistent number of input colnames
"""
colnames = ["" for i in range(ncols)]
if err_specs is None:
serr_cols = terr_cols = []
else:
# I don't want to empty the original one when using `pop` below
err_specs = copy.deepcopy(err_specs)
serr_cols = err_specs.pop("serr", [])
terr_cols = err_specs.pop("terr", [])
if names is not None:
all_error_cols = len(serr_cols) + len(terr_cols) * 2
if all_error_cols + len(names) != ncols:
raise ValueError("Inconsistent number of input colnames")
shift = 0
for i in range(ncols):
col_num = i + 1 - shift
if colnames[i] != "":
continue
colname_root = f"col{col_num}"
if names is not None:
colname_root = names[col_num - 1]
colnames[i] = f"{colname_root}"
if col_num in serr_cols:
colnames[i + 1] = f"{colname_root}_err"
shift += 1
continue
if col_num in terr_cols:
colnames[i + 1] = f"{colname_root}_perr"
colnames[i + 2] = f"{colname_root}_nerr"
shift += 2
continue
assert not np.any([c == "" for c in colnames])
return colnames
def _get_tables_from_qdp_file(qdp_file, input_colnames=None, delimiter=None):
"""Get all tables from a QDP file
Parameters
----------
qdp_file : str
Input QDP file name
Other Parameters
----------------
input_colnames : list of str
Name of data columns (defaults to ['col1', 'col2', ...]), _not_
including error columns.
delimiter : str
Delimiter for the values in the table.
Returns
-------
list of `~astropy.table.Table`
List containing all the tables present inside the QDP file
"""
lines = _get_lines_from_file(qdp_file)
contents, ncol = _get_type_from_list_of_lines(lines, delimiter=delimiter)
table_list = []
err_specs = {}
colnames = None
comment_text = ""
initial_comments = ""
command_lines = ""
current_rows = None
for line, datatype in zip(lines, contents):
line = line.strip().lstrip("!")
# Is this a comment?
if datatype == "comment":
comment_text += line + "\n"
continue
if datatype == "command":
# The first time I find commands, I save whatever comments into
# The initial comments.
if command_lines == "":
initial_comments = comment_text
comment_text = ""
if err_specs != {}:
warnings.warn(
"This file contains multiple command blocks. Please verify",
AstropyUserWarning,
)
command_lines += line + "\n"
continue
if datatype.startswith("data"):
# The first time I find data, I define err_specs
if err_specs == {} and command_lines != "":
for cline in command_lines.strip().split("\n"):
command = cline.strip().split()
# This should never happen, but just in case.
if len(command) < 3:
continue
err_specs[command[1].lower()] = [int(c) for c in command[2:]]
if colnames is None:
colnames = _interpret_err_lines(err_specs, ncol, names=input_colnames)
if current_rows is None:
current_rows = []
values = []
for v in line.split(delimiter):
if v == "NO":
values.append(np.ma.masked)
else:
# Understand if number is int or float
try:
values.append(int(v))
except ValueError:
values.append(float(v))
current_rows.append(values)
continue
if datatype == "new":
# Save table to table_list and reset
if current_rows is not None:
new_table = Table(names=colnames, rows=current_rows)
new_table.meta["initial_comments"] = initial_comments.strip().split(
"\n"
)
new_table.meta["comments"] = comment_text.strip().split("\n")
# Reset comments
comment_text = ""
table_list.append(new_table)
current_rows = None
continue
# At the very end, if there is still a table being written, let's save
# it to the table_list
if current_rows is not None:
new_table = Table(names=colnames, rows=current_rows)
new_table.meta["initial_comments"] = initial_comments.strip().split("\n")
new_table.meta["comments"] = comment_text.strip().split("\n")
table_list.append(new_table)
return table_list
def _understand_err_col(colnames):
"""Get which column names are error columns
Examples
--------
>>> colnames = ['a', 'a_err', 'b', 'b_perr', 'b_nerr']
>>> serr, terr = _understand_err_col(colnames)
>>> np.allclose(serr, [1])
True
>>> np.allclose(terr, [2])
True
>>> serr, terr = _understand_err_col(['a', 'a_nerr'])
Traceback (most recent call last):
...
ValueError: Missing positive error...
>>> serr, terr = _understand_err_col(['a', 'a_perr'])
Traceback (most recent call last):
...
ValueError: Missing negative error...
"""
shift = 0
serr = []
terr = []
for i, col in enumerate(colnames):
if col.endswith("_err"):
# The previous column, but they're numbered from 1!
# Plus, take shift into account
serr.append(i - shift)
shift += 1
elif col.endswith("_perr"):
terr.append(i - shift)
if len(colnames) == i + 1 or not colnames[i + 1].endswith("_nerr"):
raise ValueError("Missing negative error")
shift += 2
elif col.endswith("_nerr") and not colnames[i - 1].endswith("_perr"):
raise ValueError("Missing positive error")
return serr, terr
def _read_table_qdp(qdp_file, names=None, table_id=None, delimiter=None):
"""Read a table from a QDP file
Parameters
----------
qdp_file : str
Input QDP file name
Other Parameters
----------------
names : list of str
Name of data columns (defaults to ['col1', 'col2', ...]), _not_
including error columns.
table_id : int, default 0
Number of the table to be read from the QDP file. This is useful
when multiple tables present in the file. By default, the first is read.
delimiter : str
Any delimiter accepted by the `sep` argument of str.split()
Returns
-------
tables : list of `~astropy.table.Table`
List containing all the tables present inside the QDP file
"""
if table_id is None:
warnings.warn(
"table_id not specified. Reading the first available table",
AstropyUserWarning,
)
table_id = 0
tables = _get_tables_from_qdp_file(
qdp_file, input_colnames=names, delimiter=delimiter
)
return tables[table_id]
def _write_table_qdp(table, filename=None, err_specs=None):
"""Write a table to a QDP file
Parameters
----------
table : :class:`~astropy.table.Table`
Input table to be written
filename : str
Output QDP file name
Other Parameters
----------------
err_specs : dict
Dictionary of the format {'serr': [1], 'terr': [2, 3]}, specifying
which columns have symmetric and two-sided errors (see QDP format
specification)
"""
import io
fobj = io.StringIO()
if "initial_comments" in table.meta and table.meta["initial_comments"] != []:
for line in table.meta["initial_comments"]:
line = line.strip()
if not line.startswith("!"):
line = "!" + line
print(line, file=fobj)
if err_specs is None:
serr_cols, terr_cols = _understand_err_col(table.colnames)
else:
serr_cols = err_specs.pop("serr", [])
terr_cols = err_specs.pop("terr", [])
if serr_cols != []:
col_string = " ".join([str(val) for val in serr_cols])
print(f"READ SERR {col_string}", file=fobj)
if terr_cols != []:
col_string = " ".join([str(val) for val in terr_cols])
print(f"READ TERR {col_string}", file=fobj)
if "comments" in table.meta and table.meta["comments"] != []:
for line in table.meta["comments"]:
line = line.strip()
if not line.startswith("!"):
line = "!" + line
print(line, file=fobj)
colnames = table.colnames
print("!" + " ".join(colnames), file=fobj)
for row in table:
values = []
for val in row:
if not np.ma.is_masked(val):
rep = str(val)
else:
rep = "NO"
values.append(rep)
print(" ".join(values), file=fobj)
full_string = fobj.getvalue()
fobj.close()
if filename is not None:
with open(filename, "w") as fobj:
print(full_string, file=fobj)
return full_string.split("\n")
class QDPSplitter(core.DefaultSplitter):
"""
Split on space for QDP tables
"""
delimiter = " "
class QDPHeader(basic.CommentedHeaderHeader):
"""
Header that uses the :class:`astropy.io.ascii.basic.QDPSplitter`
"""
splitter_class = QDPSplitter
comment = "!"
write_comment = "!"
class QDPData(basic.BasicData):
"""
Data that uses the :class:`astropy.io.ascii.basic.CsvSplitter`
"""
splitter_class = QDPSplitter
fill_values = [(core.masked, "NO")]
comment = "!"
write_comment = None
class QDP(basic.Basic):
"""Quick and Dandy Plot table.
Example::
! Initial comment line 1
! Initial comment line 2
READ TERR 1
READ SERR 3
! Table 0 comment
!a a(pos) a(neg) b be c d
53000.5 0.25 -0.5 1 1.5 3.5 2
54000.5 1.25 -1.5 2 2.5 4.5 3
NO NO NO NO NO
! Table 1 comment
!a a(pos) a(neg) b be c d
54000.5 2.25 -2.5 NO 3.5 5.5 5
55000.5 3.25 -3.5 4 4.5 6.5 nan
The input table above contains some initial comments, the error commands,
then two tables.
This file format can contain multiple tables, separated by a line full
of ``NO``s. Comments are exclamation marks, and missing values are single
``NO`` entries. The delimiter is usually whitespace, more rarely a comma.
The QDP format differentiates between data and error columns. The table
above has commands::
READ TERR 1
READ SERR 3
which mean that after data column 1 there will be two error columns
containing its positive and engative error bars, then data column 2 without
error bars, then column 3, then a column with the symmetric error of column
3, then the remaining data columns.
As explained below, table headers are highly inconsistent. Possible
comments containing column names will be ignored and columns will be called
``col1``, ``col2``, etc. unless the user specifies their names with the
``names=`` keyword argument,
When passing column names, pass **only the names of the data columns, not
the error columns.**
Error information will be encoded in the names of the table columns.
(e.g. ``a_perr`` and ``a_nerr`` for the positive and negative error of
column ``a``, ``b_err`` the symmetric error of column ``b``.)
When writing tables to this format, users can pass an ``err_specs`` keyword
passing a dictionary ``{'serr': [3], 'terr': [1, 2]}``, meaning that data
columns 1 and two will have two additional columns each with their positive
and negative errors, and data column 3 will have an additional column with
a symmetric error (just like the ``READ SERR`` and ``READ TERR`` commands
above)
Headers are just comments, and tables distributed by various missions
can differ greatly in their use of conventions. For example, light curves
distributed by the Swift-Gehrels mission have an extra space in one header
entry that makes the number of labels inconsistent with the number of cols.
For this reason, we ignore the comments that might encode the column names
and leave the name specification to the user.
Example::
> Extra space
> |
> v
>! MJD Err (pos) Err(neg) Rate Error
>53000.123456 2.378e-05 -2.378472e-05 NO 0.212439
These readers and writer classes will strive to understand which of the
comments belong to all the tables, and which ones to each single table.
General comments will be stored in the ``initial_comments`` meta of each
table. The comments of each table will be stored in the ``comments`` meta.
Example::
t = Table.read(example_qdp, format='ascii.qdp', table_id=1, names=['a', 'b', 'c', 'd'])
reads the second table (``table_id=1``) in file ``example.qdp`` containing
the table above. There are four column names but seven data columns, why?
Because the ``READ SERR`` and ``READ TERR`` commands say that there are
three error columns.
``t.meta['initial_comments']`` will contain the initial two comment lines
in the file, while ``t.meta['comments']`` will contain ``Table 1 comment``
The table can be written to another file, preserving the same information,
as::
t.write(test_file, err_specs={'terr': [1], 'serr': [3]})
Note how the ``terr`` and ``serr`` commands are passed to the writer.
"""
_format_name = "qdp"
_io_registry_can_write = True
_io_registry_suffix = ".qdp"
_description = "Quick and Dandy Plotter"
header_class = QDPHeader
data_class = QDPData
def __init__(self, table_id=None, names=None, err_specs=None, sep=None):
super().__init__()
self.table_id = table_id
self.names = names
self.err_specs = err_specs
self.delimiter = sep
def read(self, table):
self.lines = self.inputter.get_lines(table, newline="\n")
return _read_table_qdp(
self.lines,
table_id=self.table_id,
names=self.names,
delimiter=self.delimiter,
)
def write(self, table):
self._check_multidim_table(table)
lines = _write_table_qdp(table, err_specs=self.err_specs)
return lines
|
b826a3586ac3280a64cf12627d1d1ac8a0797fc1bf6ad9fa40805ccabfa6293d | READ_DOCSTRING = """
Read the input ``table`` and return the table. Most of
the default behavior for various parameters is determined by the Reader
class.
See also:
- https://docs.astropy.org/en/stable/io/ascii/
- https://docs.astropy.org/en/stable/io/ascii/read.html
Parameters
----------
table : str, file-like, list, `pathlib.Path` object
Input table as a file name, file-like object, list of string[s],
single newline-separated string or `pathlib.Path` object.
guess : bool
Try to guess the table format. Defaults to None.
format : str, `~astropy.io.ascii.BaseReader`
Input table format
Inputter : `~astropy.io.ascii.BaseInputter`
Inputter class
Outputter : `~astropy.io.ascii.BaseOutputter`
Outputter class
delimiter : str
Column delimiter string
comment : str
Regular expression defining a comment line in table
quotechar : str
One-character string to quote fields containing special characters
header_start : int
Line index for the header line not counting comment or blank lines.
A line with only whitespace is considered blank.
data_start : int
Line index for the start of data not counting comment or blank lines.
A line with only whitespace is considered blank.
data_end : int
Line index for the end of data not counting comment or blank lines.
This value can be negative to count from the end.
converters : dict
Dictionary of converters to specify output column dtypes. Each key in
the dictionary is a column name or else a name matching pattern
including wildcards. The value is either a data type such as ``int`` or
``np.float32``; a list of such types which is tried in order until a
successful conversion is achieved; or a list of converter tuples (see
the `~astropy.io.ascii.convert_numpy` function for details).
data_Splitter : `~astropy.io.ascii.BaseSplitter`
Splitter class to split data columns
header_Splitter : `~astropy.io.ascii.BaseSplitter`
Splitter class to split header columns
names : list
List of names corresponding to each data column
include_names : list
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
fill_values : tuple, list of tuple
specification of fill values for bad or missing table values
fill_include_names : list
List of names to include in fill_values.
fill_exclude_names : list
List of names to exclude from fill_values (applied after ``fill_include_names``)
fast_reader : bool, str or dict
Whether to use the C engine, can also be a dict with options which
defaults to `False`; parameters for options dict:
use_fast_converter: bool
enable faster but slightly imprecise floating point conversion method
parallel: bool or int
multiprocessing conversion using ``cpu_count()`` or ``'number'`` processes
exponent_style: str
One-character string defining the exponent or ``'Fortran'`` to auto-detect
Fortran-style scientific notation like ``'3.14159D+00'`` (``'E'``, ``'D'``, ``'Q'``),
all case-insensitive; default ``'E'``, all other imply ``use_fast_converter``
chunk_size : int
If supplied with a value > 0 then read the table in chunks of
approximately ``chunk_size`` bytes. Default is reading table in one pass.
chunk_generator : bool
If True and ``chunk_size > 0`` then return an iterator that returns a
table for each chunk. The default is to return a single stacked table
for all the chunks.
encoding : str
Allow to specify encoding to read the file (default= ``None``).
Returns
-------
dat : `~astropy.table.Table` or <generator>
Output table
"""
# Specify allowed types for core write() keyword arguments. Each entry
# corresponds to the name of an argument and either a type (e.g. int) or a
# list of types. These get used in io.ascii.ui._validate_read_write_kwargs().
# - The commented-out kwargs are too flexible for a useful check
# - 'list-list' is a special case for an iterable that is not a string.
READ_KWARG_TYPES = {
# 'table'
"guess": bool,
# 'format'
# 'Reader'
# 'Inputter'
# 'Outputter'
"delimiter": str,
"comment": str,
"quotechar": str,
"header_start": int,
"data_start": (int, str), # CDS allows 'guess'
"data_end": int,
"converters": dict,
# 'data_Splitter'
# 'header_Splitter'
"names": "list-like",
"include_names": "list-like",
"exclude_names": "list-like",
"fill_values": "list-like",
"fill_include_names": "list-like",
"fill_exclude_names": "list-like",
"fast_reader": (bool, str, dict),
"encoding": str,
}
WRITE_DOCSTRING = """
Write the input ``table`` to ``filename``. Most of the default behavior
for various parameters is determined by the Writer class.
See also:
- https://docs.astropy.org/en/stable/io/ascii/
- https://docs.astropy.org/en/stable/io/ascii/write.html
Parameters
----------
table : `~astropy.io.ascii.BaseReader`, array-like, str, file-like, list
Input table as a Reader object, Numpy struct array, file name,
file-like object, list of strings, or single newline-separated string.
output : str, file-like
Output [filename, file-like object]. Defaults to``sys.stdout``.
format : str
Output table format. Defaults to 'basic'.
delimiter : str
Column delimiter string
comment : str, bool
String defining a comment line in table. If `False` then comments
are not written out.
quotechar : str
One-character string to quote fields containing special characters
formats : dict
Dictionary of format specifiers or formatting functions
strip_whitespace : bool
Strip surrounding whitespace from column values.
names : list
List of names corresponding to each data column
include_names : list
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
fast_writer : bool, str
Whether to use the fast Cython writer. Can be `True` (use fast writer
if available), `False` (do not use fast writer), or ``'force'`` (use
fast writer and fail if not available, mostly for testing).
overwrite : bool
If ``overwrite=False`` (default) and the file exists, then an OSError
is raised. This parameter is ignored when the ``output`` arg is not a
string (e.g., a file object).
"""
# Specify allowed types for core write() keyword arguments. Each entry
# corresponds to the name of an argument and either a type (e.g. int) or a
# list of types. These get used in io.ascii.ui._validate_read_write_kwargs().
# - The commented-out kwargs are too flexible for a useful check
# - 'list-list' is a special case for an iterable that is not a string.
WRITE_KWARG_TYPES = {
# 'table'
# 'output'
"format": str,
"delimiter": str,
"comment": (str, bool),
"quotechar": str,
"header_start": int,
"formats": dict,
"strip_whitespace": (bool),
"names": "list-like",
"include_names": "list-like",
"exclude_names": "list-like",
"fast_writer": (bool, str),
"overwrite": (bool),
}
|
e2e111439c0486e842708fea75d1a05bc126577cfad4cdce1c547b53adfb7d6e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
ui.py:
Provides the main user functions for reading and writing tables.
:Copyright: Smithsonian Astrophysical Observatory (2010)
:Author: Tom Aldcroft ([email protected])
"""
import collections
import contextlib
import copy
import os
import re
import sys
import time
import warnings
from io import StringIO
import numpy as np
from astropy.table import Table
from astropy.utils.data import get_readable_fileobj
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.misc import NOT_OVERWRITING_MSG
from . import (
basic,
cds,
core,
cparser,
daophot,
ecsv,
fastbasic,
fixedwidth,
html,
ipac,
latex,
mrt,
rst,
sextractor,
)
from .docs import READ_KWARG_TYPES, WRITE_KWARG_TYPES
_read_trace = []
# Default setting for guess parameter in read()
_GUESS = True
def _probably_html(table, maxchars=100000):
"""
Determine if ``table`` probably contains HTML content. See PR #3693 and issue
#3691 for context.
"""
if not isinstance(table, str):
try:
# If table is an iterable (list of strings) then take the first
# maxchars of these. Make sure this is something with random
# access to exclude a file-like object
table[0]
table[:1]
size = 0
for i, line in enumerate(table):
size += len(line)
if size > maxchars:
table = table[: i + 1]
break
table = os.linesep.join(table)
except Exception:
pass
if isinstance(table, str):
# Look for signs of an HTML table in the first maxchars characters
table = table[:maxchars]
# URL ending in .htm or .html
if re.match(
r"( http[s]? | ftp | file ) :// .+ \.htm[l]?$",
table,
re.IGNORECASE | re.VERBOSE,
):
return True
# Filename ending in .htm or .html which exists
if re.search(r"\.htm[l]?$", table[-5:], re.IGNORECASE) and os.path.exists(
os.path.expanduser(table)
):
return True
# Table starts with HTML document type declaration
if re.match(r"\s* <! \s* DOCTYPE \s* HTML", table, re.IGNORECASE | re.VERBOSE):
return True
# Look for <TABLE .. >, <TR .. >, <TD .. > tag openers.
if all(
re.search(rf"< \s* {element} [^>]* >", table, re.IGNORECASE | re.VERBOSE)
for element in ("table", "tr", "td")
):
return True
return False
def set_guess(guess):
"""
Set the default value of the ``guess`` parameter for read()
Parameters
----------
guess : bool
New default ``guess`` value (e.g., True or False)
"""
global _GUESS
_GUESS = guess
def get_reader(Reader=None, Inputter=None, Outputter=None, **kwargs):
"""
Initialize a table reader allowing for common customizations. Most of the
default behavior for various parameters is determined by the Reader class.
Parameters
----------
Reader : `~astropy.io.ascii.BaseReader`
Reader class (DEPRECATED). Default is :class:`Basic`.
Inputter : `~astropy.io.ascii.BaseInputter`
Inputter class
Outputter : `~astropy.io.ascii.BaseOutputter`
Outputter class
delimiter : str
Column delimiter string
comment : str
Regular expression defining a comment line in table
quotechar : str
One-character string to quote fields containing special characters
header_start : int
Line index for the header line not counting comment or blank lines.
A line with only whitespace is considered blank.
data_start : int
Line index for the start of data not counting comment or blank lines.
A line with only whitespace is considered blank.
data_end : int
Line index for the end of data not counting comment or blank lines.
This value can be negative to count from the end.
converters : dict
Dict of converters.
data_Splitter : `~astropy.io.ascii.BaseSplitter`
Splitter class to split data columns.
header_Splitter : `~astropy.io.ascii.BaseSplitter`
Splitter class to split header columns.
names : list
List of names corresponding to each data column.
include_names : list, optional
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``).
fill_values : tuple, list of tuple
Specification of fill values for bad or missing table values.
fill_include_names : list
List of names to include in fill_values.
fill_exclude_names : list
List of names to exclude from fill_values (applied after ``fill_include_names``).
Returns
-------
reader : `~astropy.io.ascii.BaseReader` subclass
ASCII format reader instance
"""
# This function is a light wrapper around core._get_reader to provide a
# public interface with a default Reader.
if Reader is None:
# Default reader is Basic unless fast reader is forced
fast_reader = _get_fast_reader_dict(kwargs)
if fast_reader["enable"] == "force":
Reader = fastbasic.FastBasic
else:
Reader = basic.Basic
reader = core._get_reader(Reader, Inputter=Inputter, Outputter=Outputter, **kwargs)
return reader
def _get_format_class(format, ReaderWriter, label):
if format is not None and ReaderWriter is not None:
raise ValueError(f"Cannot supply both format and {label} keywords")
if format is not None:
if format in core.FORMAT_CLASSES:
ReaderWriter = core.FORMAT_CLASSES[format]
else:
raise ValueError(
"ASCII format {!r} not in allowed list {}".format(
format, sorted(core.FORMAT_CLASSES)
)
)
return ReaderWriter
def _get_fast_reader_dict(kwargs):
"""Convert 'fast_reader' key in kwargs into a dict if not already and make sure
'enable' key is available.
"""
fast_reader = copy.deepcopy(kwargs.get("fast_reader", True))
if isinstance(fast_reader, dict):
fast_reader.setdefault("enable", "force")
else:
fast_reader = {"enable": fast_reader}
return fast_reader
def _validate_read_write_kwargs(read_write, **kwargs):
"""Validate types of keyword arg inputs to read() or write()."""
def is_ducktype(val, cls):
"""Check if ``val`` is an instance of ``cls`` or "seems" like one:
``cls(val) == val`` does not raise and exception and is `True`. In
this way you can pass in ``np.int16(2)`` and have that count as `int`.
This has a special-case of ``cls`` being 'list-like', meaning it is
an iterable but not a string.
"""
if cls == "list-like":
ok = not isinstance(val, str) and isinstance(val, collections.abc.Iterable)
else:
ok = isinstance(val, cls)
if not ok:
# See if ``val`` walks and quacks like a ``cls```.
try:
new_val = cls(val)
assert new_val == val
except Exception:
ok = False
else:
ok = True
return ok
kwarg_types = READ_KWARG_TYPES if read_write == "read" else WRITE_KWARG_TYPES
for arg, val in kwargs.items():
# Kwarg type checking is opt-in, so kwargs not in the list are considered OK.
# This reflects that some readers allow additional arguments that may not
# be well-specified, e.g. ```__init__(self, **kwargs)`` is an option.
if arg not in kwarg_types or val is None:
continue
# Single type or tuple of types for this arg (like isinstance())
types = kwarg_types[arg]
err_msg = (
f"{read_write}() argument '{arg}' must be a "
f"{types} object, got {type(val)} instead"
)
# Force `types` to be a tuple for the any() check below
if not isinstance(types, tuple):
types = (types,)
if not any(is_ducktype(val, cls) for cls in types):
raise TypeError(err_msg)
def _expand_user_if_path(argument):
if isinstance(argument, (str, bytes, os.PathLike)):
# For the `read()` method, a `str` input can be either a file path or
# the table data itself. File names for io.ascii cannot have newlines
# in them and io.ascii does not accept table data as `bytes`, so we can
# attempt to detect data strings like this.
is_str_data = isinstance(argument, str) and (
"\n" in argument or "\r" in argument
)
if not is_str_data:
# Remain conservative in expanding the presumed-path
ex_user = os.path.expanduser(argument)
if os.path.exists(ex_user):
argument = ex_user
return argument
def read(table, guess=None, **kwargs):
# This the final output from reading. Static analysis indicates the reading
# logic (which is indeed complex) might not define `dat`, thus do so here.
dat = None
# Docstring defined below
del _read_trace[:]
# Downstream readers might munge kwargs
kwargs = copy.deepcopy(kwargs)
_validate_read_write_kwargs("read", **kwargs)
# Convert 'fast_reader' key in kwargs into a dict if not already and make sure
# 'enable' key is available.
fast_reader = _get_fast_reader_dict(kwargs)
kwargs["fast_reader"] = fast_reader
if fast_reader["enable"] and fast_reader.get("chunk_size"):
return _read_in_chunks(table, **kwargs)
if "fill_values" not in kwargs:
kwargs["fill_values"] = [("", "0")]
# If an Outputter is supplied in kwargs that will take precedence.
if (
"Outputter" in kwargs
): # user specified Outputter, not supported for fast reading
fast_reader["enable"] = False
format = kwargs.get("format")
# Dictionary arguments are passed by reference per default and thus need
# special protection:
new_kwargs = copy.deepcopy(kwargs)
kwargs["fast_reader"] = copy.deepcopy(fast_reader)
# Get the Reader class based on possible format and Reader kwarg inputs.
Reader = _get_format_class(format, kwargs.get("Reader"), "Reader")
if Reader is not None:
new_kwargs["Reader"] = Reader
format = Reader._format_name
# Remove format keyword if there, this is only allowed in read() not get_reader()
if "format" in new_kwargs:
del new_kwargs["format"]
if guess is None:
guess = _GUESS
if guess:
# If ``table`` is probably an HTML file then tell guess function to add
# the HTML reader at the top of the guess list. This is in response to
# issue #3691 (and others) where libxml can segfault on a long non-HTML
# file, thus prompting removal of the HTML reader from the default
# guess list.
new_kwargs["guess_html"] = _probably_html(table)
# If `table` is a filename or readable file object then read in the
# file now. This prevents problems in Python 3 with the file object
# getting closed or left at the file end. See #3132, #3013, #3109,
# #2001. If a `readme` arg was passed that implies CDS format, in
# which case the original `table` as the data filename must be left
# intact.
if "readme" not in new_kwargs:
encoding = kwargs.get("encoding")
try:
table = _expand_user_if_path(table)
with get_readable_fileobj(table, encoding=encoding) as fileobj:
table = fileobj.read()
except ValueError: # unreadable or invalid binary file
raise
except Exception:
pass
else:
# Ensure that `table` has at least one \r or \n in it
# so that the core.BaseInputter test of
# ('\n' not in table and '\r' not in table)
# will fail and so `table` cannot be interpreted there
# as a filename. See #4160.
if not re.search(r"[\r\n]", table):
table = table + os.linesep
# If the table got successfully read then look at the content
# to see if is probably HTML, but only if it wasn't already
# identified as HTML based on the filename.
if not new_kwargs["guess_html"]:
new_kwargs["guess_html"] = _probably_html(table)
# Get the table from guess in ``dat``. If ``dat`` comes back as None
# then there was just one set of kwargs in the guess list so fall
# through below to the non-guess way so that any problems result in a
# more useful traceback.
dat = _guess(table, new_kwargs, format, fast_reader)
if dat is None:
guess = False
if not guess:
if format is None:
reader = get_reader(**new_kwargs)
format = reader._format_name
table = _expand_user_if_path(table)
# Try the fast reader version of `format` first if applicable. Note that
# if user specified a fast format (e.g. format='fast_basic') this test
# will fail and the else-clause below will be used.
if fast_reader["enable"] and f"fast_{format}" in core.FAST_CLASSES:
fast_kwargs = copy.deepcopy(new_kwargs)
fast_kwargs["Reader"] = core.FAST_CLASSES[f"fast_{format}"]
fast_reader_rdr = get_reader(**fast_kwargs)
try:
dat = fast_reader_rdr.read(table)
_read_trace.append(
{
"kwargs": copy.deepcopy(fast_kwargs),
"Reader": fast_reader_rdr.__class__,
"status": "Success with fast reader (no guessing)",
}
)
except (
core.ParameterError,
cparser.CParserError,
UnicodeEncodeError,
) as err:
# special testing value to avoid falling back on the slow reader
if fast_reader["enable"] == "force":
raise core.InconsistentTableError(
f"fast reader {fast_reader_rdr.__class__} exception: {err}"
)
# If the fast reader doesn't work, try the slow version
reader = get_reader(**new_kwargs)
dat = reader.read(table)
_read_trace.append(
{
"kwargs": copy.deepcopy(new_kwargs),
"Reader": reader.__class__,
"status": (
"Success with slow reader after failing"
" with fast (no guessing)"
),
}
)
else:
reader = get_reader(**new_kwargs)
dat = reader.read(table)
_read_trace.append(
{
"kwargs": copy.deepcopy(new_kwargs),
"Reader": reader.__class__,
"status": "Success with specified Reader class (no guessing)",
}
)
# Static analysis (pyright) indicates `dat` might be left undefined, so just
# to be sure define it at the beginning and check here.
if dat is None:
raise RuntimeError(
"read() function failed due to code logic error, "
"please report this bug on github"
)
return dat
read.__doc__ = core.READ_DOCSTRING
def _guess(table, read_kwargs, format, fast_reader):
"""
Try to read the table using various sets of keyword args. Start with the
standard guess list and filter to make it unique and consistent with
user-supplied read keyword args. Finally, if none of those work then
try the original user-supplied keyword args.
Parameters
----------
table : str, file-like, list
Input table as a file name, file-like object, list of strings, or
single newline-separated string.
read_kwargs : dict
Keyword arguments from user to be supplied to reader
format : str
Table format
fast_reader : dict
Options for the C engine fast reader. See read() function for details.
Returns
-------
dat : `~astropy.table.Table` or None
Output table or None if only one guess format was available
"""
# Keep a trace of all failed guesses kwarg
failed_kwargs = []
# Get an ordered list of read() keyword arg dicts that will be cycled
# through in order to guess the format.
full_list_guess = _get_guess_kwargs_list(read_kwargs)
# If a fast version of the reader is available, try that before the slow version
if (
fast_reader["enable"]
and format is not None
and f"fast_{format}" in core.FAST_CLASSES
):
fast_kwargs = copy.deepcopy(read_kwargs)
fast_kwargs["Reader"] = core.FAST_CLASSES[f"fast_{format}"]
full_list_guess = [fast_kwargs] + full_list_guess
else:
fast_kwargs = None
# Filter the full guess list so that each entry is consistent with user kwarg inputs.
# This also removes any duplicates from the list.
filtered_guess_kwargs = []
fast_reader = read_kwargs.get("fast_reader")
for guess_kwargs in full_list_guess:
# If user specified slow reader then skip all fast readers
if (
fast_reader["enable"] is False
and guess_kwargs["Reader"] in core.FAST_CLASSES.values()
):
_read_trace.append(
{
"kwargs": copy.deepcopy(guess_kwargs),
"Reader": guess_kwargs["Reader"].__class__,
"status": "Disabled: reader only available in fast version",
"dt": f"{0.0:.3f} ms",
}
)
continue
# If user required a fast reader then skip all non-fast readers
if (
fast_reader["enable"] == "force"
and guess_kwargs["Reader"] not in core.FAST_CLASSES.values()
):
_read_trace.append(
{
"kwargs": copy.deepcopy(guess_kwargs),
"Reader": guess_kwargs["Reader"].__class__,
"status": "Disabled: no fast version of reader available",
"dt": f"{0.0:.3f} ms",
}
)
continue
guess_kwargs_ok = True # guess_kwargs are consistent with user_kwargs?
for key, val in read_kwargs.items():
# Do guess_kwargs.update(read_kwargs) except that if guess_args has
# a conflicting key/val pair then skip this guess entirely.
if key not in guess_kwargs:
guess_kwargs[key] = copy.deepcopy(val)
elif val != guess_kwargs[key] and guess_kwargs != fast_kwargs:
guess_kwargs_ok = False
break
if not guess_kwargs_ok:
# User-supplied kwarg is inconsistent with the guess-supplied kwarg, e.g.
# user supplies delimiter="|" but the guess wants to try delimiter=" ",
# so skip the guess entirely.
continue
# Add the guess_kwargs to filtered list only if it is not already there.
if guess_kwargs not in filtered_guess_kwargs:
filtered_guess_kwargs.append(guess_kwargs)
# If there are not at least two formats to guess then return no table
# (None) to indicate that guessing did not occur. In that case the
# non-guess read() will occur and any problems will result in a more useful
# traceback.
if len(filtered_guess_kwargs) <= 1:
return None
# Define whitelist of exceptions that are expected from readers when
# processing invalid inputs. Note that OSError must fall through here
# so one cannot simply catch any exception.
guess_exception_classes = (
core.InconsistentTableError,
ValueError,
TypeError,
AttributeError,
core.OptionalTableImportError,
core.ParameterError,
cparser.CParserError,
)
# Now cycle through each possible reader and associated keyword arguments.
# Try to read the table using those args, and if an exception occurs then
# keep track of the failed guess and move on.
for guess_kwargs in filtered_guess_kwargs:
t0 = time.time()
try:
# If guessing will try all Readers then use strict req'ts on column names
if "Reader" not in read_kwargs:
guess_kwargs["strict_names"] = True
reader = get_reader(**guess_kwargs)
reader.guessing = True
dat = reader.read(table)
_read_trace.append(
{
"kwargs": copy.deepcopy(guess_kwargs),
"Reader": reader.__class__,
"status": "Success (guessing)",
"dt": f"{(time.time() - t0) * 1000:.3f} ms",
}
)
return dat
except guess_exception_classes as err:
_read_trace.append(
{
"kwargs": copy.deepcopy(guess_kwargs),
"status": f"{err.__class__.__name__}: {str(err)}",
"dt": f"{(time.time() - t0) * 1000:.3f} ms",
}
)
failed_kwargs.append(guess_kwargs)
else:
# Failed all guesses, try the original read_kwargs without column requirements
try:
reader = get_reader(**read_kwargs)
dat = reader.read(table)
_read_trace.append(
{
"kwargs": copy.deepcopy(read_kwargs),
"Reader": reader.__class__,
"status": (
"Success with original kwargs without strict_names (guessing)"
),
}
)
return dat
except guess_exception_classes as err:
_read_trace.append(
{
"kwargs": copy.deepcopy(read_kwargs),
"status": f"{err.__class__.__name__}: {str(err)}",
}
)
failed_kwargs.append(read_kwargs)
lines = [
"\nERROR: Unable to guess table format with the guesses listed below:"
]
for kwargs in failed_kwargs:
sorted_keys = sorted(
x for x in sorted(kwargs) if x not in ("Reader", "Outputter")
)
reader_repr = repr(kwargs.get("Reader", basic.Basic))
keys_vals = ["Reader:" + re.search(r"\.(\w+)'>", reader_repr).group(1)]
kwargs_sorted = ((key, kwargs[key]) for key in sorted_keys)
keys_vals.extend([f"{key}: {val!r}" for key, val in kwargs_sorted])
lines.append(" ".join(keys_vals))
msg = [
"",
"************************************************************************",
"** ERROR: Unable to guess table format with the guesses listed above. **",
"** **",
"** To figure out why the table did not read, use guess=False and **",
"** fast_reader=False, along with any appropriate arguments to read(). **",
"** In particular specify the format and any known attributes like the **",
"** delimiter. **",
"************************************************************************",
]
lines.extend(msg)
raise core.InconsistentTableError("\n".join(lines)) from None
def _get_guess_kwargs_list(read_kwargs):
"""
Get the full list of reader keyword argument dicts that are the basis
for the format guessing process. The returned full list will then be:
- Filtered to be consistent with user-supplied kwargs
- Cleaned to have only unique entries
- Used one by one to try reading the input table
Note that the order of the guess list has been tuned over years of usage.
Maintainers need to be very careful about any adjustments as the
reasoning may not be immediately evident in all cases.
This list can (and usually does) include duplicates. This is a result
of the order tuning, but these duplicates get removed later.
Parameters
----------
read_kwargs : dict
User-supplied read keyword args
Returns
-------
guess_kwargs_list : list
List of read format keyword arg dicts
"""
guess_kwargs_list = []
# If the table is probably HTML based on some heuristics then start with the
# HTML reader.
if read_kwargs.pop("guess_html", None):
guess_kwargs_list.append(dict(Reader=html.HTML))
# Start with ECSV because an ECSV file will be read by Basic. This format
# has very specific header requirements and fails out quickly.
guess_kwargs_list.append(dict(Reader=ecsv.Ecsv))
# Now try readers that accept the user-supplied keyword arguments
# (actually include all here - check for compatibility of arguments later).
# FixedWidthTwoLine would also be read by Basic, so it needs to come first;
# same for RST.
for reader in (
fixedwidth.FixedWidthTwoLine,
rst.RST,
fastbasic.FastBasic,
basic.Basic,
fastbasic.FastRdb,
basic.Rdb,
fastbasic.FastTab,
basic.Tab,
cds.Cds,
mrt.Mrt,
daophot.Daophot,
sextractor.SExtractor,
ipac.Ipac,
latex.Latex,
latex.AASTex,
):
guess_kwargs_list.append(dict(Reader=reader))
# Cycle through the basic-style readers using all combinations of delimiter
# and quotechar.
for Reader in (
fastbasic.FastCommentedHeader,
basic.CommentedHeader,
fastbasic.FastBasic,
basic.Basic,
fastbasic.FastNoHeader,
basic.NoHeader,
):
for delimiter in ("|", ",", " ", r"\s"):
for quotechar in ('"', "'"):
guess_kwargs_list.append(
dict(Reader=Reader, delimiter=delimiter, quotechar=quotechar)
)
return guess_kwargs_list
def _read_in_chunks(table, **kwargs):
"""
For fast_reader read the ``table`` in chunks and vstack to create
a single table, OR return a generator of chunk tables.
"""
fast_reader = kwargs["fast_reader"]
chunk_size = fast_reader.pop("chunk_size")
chunk_generator = fast_reader.pop("chunk_generator", False)
fast_reader["parallel"] = False # No parallel with chunks
tbl_chunks = _read_in_chunks_generator(table, chunk_size, **kwargs)
if chunk_generator:
return tbl_chunks
tbl0 = next(tbl_chunks)
masked = tbl0.masked
# Numpy won't allow resizing the original so make a copy here.
out_cols = {col.name: col.data.copy() for col in tbl0.itercols()}
str_kinds = ("S", "U")
for tbl in tbl_chunks:
masked |= tbl.masked
for name, col in tbl.columns.items():
# Concatenate current column data and new column data
# If one of the inputs is string-like and the other is not, then
# convert the non-string to a string. In a perfect world this would
# be handled by numpy, but as of numpy 1.13 this results in a string
# dtype that is too long (https://github.com/numpy/numpy/issues/10062).
col1, col2 = out_cols[name], col.data
if col1.dtype.kind in str_kinds and col2.dtype.kind not in str_kinds:
col2 = np.array(col2.tolist(), dtype=col1.dtype.kind)
elif col2.dtype.kind in str_kinds and col1.dtype.kind not in str_kinds:
col1 = np.array(col1.tolist(), dtype=col2.dtype.kind)
# Choose either masked or normal concatenation
concatenate = np.ma.concatenate if masked else np.concatenate
out_cols[name] = concatenate([col1, col2])
# Make final table from numpy arrays, converting dict to list
out_cols = [out_cols[name] for name in tbl0.colnames]
out = tbl0.__class__(out_cols, names=tbl0.colnames, meta=tbl0.meta, copy=False)
return out
def _read_in_chunks_generator(table, chunk_size, **kwargs):
"""
For fast_reader read the ``table`` in chunks and return a generator
of tables for each chunk.
"""
@contextlib.contextmanager
def passthrough_fileobj(fileobj, encoding=None):
"""Stub for get_readable_fileobj, which does not seem to work in Py3
for input file-like object, see #6460"""
yield fileobj
# Set up to coerce `table` input into a readable file object by selecting
# an appropriate function.
# Convert table-as-string to a File object. Finding a newline implies
# that the string is not a filename.
if isinstance(table, str) and ("\n" in table or "\r" in table):
table = StringIO(table)
fileobj_context = passthrough_fileobj
elif hasattr(table, "read") and hasattr(table, "seek"):
fileobj_context = passthrough_fileobj
else:
# string filename or pathlib
fileobj_context = get_readable_fileobj
# Set up for iterating over chunks
kwargs["fast_reader"]["return_header_chars"] = True
header = "" # Table header (up to start of data)
prev_chunk_chars = "" # Chars from previous chunk after last newline
first_chunk = True # True for the first chunk, False afterward
with fileobj_context(table, encoding=kwargs.get("encoding")) as fh:
while True:
chunk = fh.read(chunk_size)
# Got fewer chars than requested, must be end of file
final_chunk = len(chunk) < chunk_size
# If this is the last chunk and there is only whitespace then break
if final_chunk and not re.search(r"\S", chunk):
break
# Step backwards from last character in chunk and find first newline
for idx in range(len(chunk) - 1, -1, -1):
if final_chunk or chunk[idx] == "\n":
break
else:
raise ValueError("no newline found in chunk (chunk_size too small?)")
# Stick on the header to the chunk part up to (and including) the
# last newline. Make sure the small strings are concatenated first.
complete_chunk = (header + prev_chunk_chars) + chunk[: idx + 1]
prev_chunk_chars = chunk[idx + 1 :]
# Now read the chunk as a complete table
tbl = read(complete_chunk, guess=False, **kwargs)
# For the first chunk pop the meta key which contains the header
# characters (everything up to the start of data) then fix kwargs
# so it doesn't return that in meta any more.
if first_chunk:
header = tbl.meta.pop("__ascii_fast_reader_header_chars__")
first_chunk = False
yield tbl
if final_chunk:
break
extra_writer_pars = (
"delimiter",
"comment",
"quotechar",
"formats",
"names",
"include_names",
"exclude_names",
"strip_whitespace",
)
def get_writer(Writer=None, fast_writer=True, **kwargs):
"""
Initialize a table writer allowing for common customizations. Most of the
default behavior for various parameters is determined by the Writer class.
Parameters
----------
Writer : ``Writer``
Writer class (DEPRECATED). Defaults to :class:`Basic`.
delimiter : str
Column delimiter string
comment : str
String defining a comment line in table
quotechar : str
One-character string to quote fields containing special characters
formats : dict
Dictionary of format specifiers or formatting functions
strip_whitespace : bool
Strip surrounding whitespace from column values.
names : list
List of names corresponding to each data column
include_names : list
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
fast_writer : bool
Whether to use the fast Cython writer.
Returns
-------
writer : `~astropy.io.ascii.BaseReader` subclass
ASCII format writer instance
"""
if Writer is None:
Writer = basic.Basic
if "strip_whitespace" not in kwargs:
kwargs["strip_whitespace"] = True
writer = core._get_writer(Writer, fast_writer, **kwargs)
# Handle the corner case of wanting to disable writing table comments for the
# commented_header format. This format *requires* a string for `write_comment`
# because that is used for the header column row, so it is not possible to
# set the input `comment` to None. Without adding a new keyword or assuming
# a default comment character, there is no other option but to tell user to
# simply remove the meta['comments'].
if isinstance(
writer, (basic.CommentedHeader, fastbasic.FastCommentedHeader)
) and not isinstance(kwargs.get("comment", ""), str):
raise ValueError(
"for the commented_header writer you must supply a string\n"
"value for the `comment` keyword. In order to disable writing\n"
"table comments use `del t.meta['comments']` prior to writing."
)
return writer
def write(
table,
output=None,
format=None,
Writer=None,
fast_writer=True,
*,
overwrite=False,
**kwargs,
):
# Docstring inserted below
_validate_read_write_kwargs(
"write", format=format, fast_writer=fast_writer, overwrite=overwrite, **kwargs
)
if isinstance(output, (str, bytes, os.PathLike)):
output = os.path.expanduser(output)
if not overwrite and os.path.lexists(output):
raise OSError(NOT_OVERWRITING_MSG.format(output))
if output is None:
output = sys.stdout
# Ensure that `table` is a Table subclass.
names = kwargs.get("names")
if isinstance(table, Table):
# While we are only going to read data from columns, we may need to
# to adjust info attributes such as format, so we make a shallow copy.
table = table.__class__(table, names=names, copy=False)
else:
# Otherwise, create a table from the input.
table = Table(table, names=names, copy=False)
table0 = table[:0].copy()
core._apply_include_exclude_names(
table0,
kwargs.get("names"),
kwargs.get("include_names"),
kwargs.get("exclude_names"),
)
diff_format_with_names = set(kwargs.get("formats", [])) - set(table0.colnames)
if diff_format_with_names:
warnings.warn(
"The key(s) {} specified in the formats argument do not match a column"
" name.".format(diff_format_with_names),
AstropyWarning,
)
if table.has_mixin_columns:
fast_writer = False
Writer = _get_format_class(format, Writer, "Writer")
writer = get_writer(Writer=Writer, fast_writer=fast_writer, **kwargs)
if writer._format_name in core.FAST_CLASSES:
writer.write(table, output)
return
lines = writer.write(table)
# Write the lines to output
outstr = os.linesep.join(lines)
if not hasattr(output, "write"):
# NOTE: we need to specify newline='', otherwise the default
# behavior is for Python to translate \r\n (which we write because
# of os.linesep) into \r\r\n. Specifying newline='' disables any
# auto-translation.
output = open(output, "w", newline="")
output.write(outstr)
output.write(os.linesep)
output.close()
else:
output.write(outstr)
output.write(os.linesep)
write.__doc__ = core.WRITE_DOCSTRING
def get_read_trace():
"""
Return a traceback of the attempted read formats for the last call to
`~astropy.io.ascii.read` where guessing was enabled. This is primarily for
debugging.
The return value is a list of dicts, where each dict includes the keyword
args ``kwargs`` used in the read call and the returned ``status``.
Returns
-------
trace : list of dict
Ordered list of format guesses and status
"""
return copy.deepcopy(_read_trace)
|
c5f60c1c49a39a8f6c1b42b31393677045270afa960802b24a25d4740938e83f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import re
from collections import OrderedDict
from astropy.table import Table
from astropy.utils.misc import _set_locale
from . import core, cparser
class FastBasic(metaclass=core.MetaBaseReader):
"""
This class is intended to handle the same format addressed by the
ordinary :class:`Basic` writer, but it acts as a wrapper for underlying C
code and is therefore much faster. Unlike the other ASCII readers and
writers, this class is not very extensible and is restricted
by optimization requirements.
"""
_format_name = "fast_basic"
_description = "Basic table with custom delimiter using the fast C engine"
_fast = True
fill_extra_cols = False
guessing = False
strict_names = False
def __init__(self, default_kwargs={}, **user_kwargs):
# Make sure user does not set header_start to None for a reader
# that expects a non-None value (i.e. a number >= 0). This mimics
# what happens in the Basic reader.
if (
default_kwargs.get("header_start", 0) is not None
and user_kwargs.get("header_start", 0) is None
):
raise ValueError("header_start cannot be set to None for this Reader")
# Set up kwargs and copy any user kwargs. Use deepcopy user kwargs
# since they may contain a dict item which would end up as a ref to the
# original and get munged later (e.g. in cparser.pyx validation of
# fast_reader dict).
kwargs = copy.deepcopy(default_kwargs)
kwargs.update(copy.deepcopy(user_kwargs))
delimiter = kwargs.pop("delimiter", " ")
self.delimiter = str(delimiter) if delimiter is not None else None
self.write_comment = kwargs.get("comment", "# ")
self.comment = kwargs.pop("comment", "#")
if self.comment is not None:
self.comment = str(self.comment)
self.quotechar = str(kwargs.pop("quotechar", '"'))
self.header_start = kwargs.pop("header_start", 0)
# If data_start is not specified, start reading
# data right after the header line
data_start_default = user_kwargs.get(
"data_start", self.header_start + 1 if self.header_start is not None else 1
)
self.data_start = kwargs.pop("data_start", data_start_default)
self.kwargs = kwargs
self.strip_whitespace_lines = True
self.strip_whitespace_fields = True
def _read_header(self):
# Use the tokenizer by default -- this method
# can be overridden for specialized headers
self.engine.read_header()
def read(self, table):
"""
Read input data (file-like object, filename, list of strings, or
single string) into a Table and return the result.
"""
if self.comment is not None and len(self.comment) != 1:
raise core.ParameterError("The C reader does not support a comment regex")
elif self.data_start is None:
raise core.ParameterError(
"The C reader does not allow data_start to be None"
)
elif (
self.header_start is not None
and self.header_start < 0
and not isinstance(self, FastCommentedHeader)
):
raise core.ParameterError(
"The C reader does not allow header_start to be "
"negative except for commented-header files"
)
elif self.data_start < 0:
raise core.ParameterError(
"The C reader does not allow data_start to be negative"
)
elif len(self.delimiter) != 1:
raise core.ParameterError("The C reader only supports 1-char delimiters")
elif len(self.quotechar) != 1:
raise core.ParameterError(
"The C reader only supports a length-1 quote character"
)
elif "converters" in self.kwargs:
raise core.ParameterError(
"The C reader does not support passing specialized converters"
)
elif "encoding" in self.kwargs:
raise core.ParameterError(
"The C reader does not use the encoding parameter"
)
elif "Outputter" in self.kwargs:
raise core.ParameterError(
"The C reader does not use the Outputter parameter"
)
elif "Inputter" in self.kwargs:
raise core.ParameterError(
"The C reader does not use the Inputter parameter"
)
elif "data_Splitter" in self.kwargs or "header_Splitter" in self.kwargs:
raise core.ParameterError("The C reader does not use a Splitter class")
self.strict_names = self.kwargs.pop("strict_names", False)
# Process fast_reader kwarg, which may or may not exist (though ui.py will always
# pass this as a dict with at least 'enable' set).
fast_reader = self.kwargs.get("fast_reader", True)
if not isinstance(fast_reader, dict):
fast_reader = {}
fast_reader.pop("enable", None)
self.return_header_chars = fast_reader.pop("return_header_chars", False)
# Put fast_reader dict back into kwargs.
self.kwargs["fast_reader"] = fast_reader
self.engine = cparser.CParser(
table,
self.strip_whitespace_lines,
self.strip_whitespace_fields,
delimiter=self.delimiter,
header_start=self.header_start,
comment=self.comment,
quotechar=self.quotechar,
data_start=self.data_start,
fill_extra_cols=self.fill_extra_cols,
**self.kwargs,
)
conversion_info = self._read_header()
self.check_header()
if conversion_info is not None:
try_int, try_float, try_string = conversion_info
else:
try_int = {}
try_float = {}
try_string = {}
with _set_locale("C"):
data, comments = self.engine.read(try_int, try_float, try_string)
out = self.make_table(data, comments)
if self.return_header_chars:
out.meta["__ascii_fast_reader_header_chars__"] = self.engine.header_chars
return out
def make_table(self, data, comments):
"""Actually make the output table give the data and comments."""
meta = OrderedDict()
if comments:
meta["comments"] = comments
names = core._deduplicate_names(self.engine.get_names())
return Table(data, names=names, meta=meta)
def check_header(self):
names = self.engine.get_header_names() or self.engine.get_names()
if self.strict_names:
# Impose strict requirements on column names (normally used in guessing)
bads = [" ", ",", "|", "\t", "'", '"']
for name in names:
if (
core._is_number(name)
or len(name) == 0
or name[0] in bads
or name[-1] in bads
):
raise ValueError(
f"Column name {name!r} does not meet strict name requirements"
)
# When guessing require at least two columns
if self.guessing and len(names) <= 1:
raise ValueError(
f"Table format guessing requires at least two columns, got {names}"
)
def write(self, table, output):
"""
Use a fast Cython method to write table data to output,
where output is a filename or file-like object.
"""
self._write(table, output, {})
def _write(
self, table, output, default_kwargs, header_output=True, output_types=False
):
# Fast writer supports only 1-d columns
core._check_multidim_table(table, max_ndim=1)
write_kwargs = {
"delimiter": self.delimiter,
"quotechar": self.quotechar,
"strip_whitespace": self.strip_whitespace_fields,
"comment": self.write_comment,
}
write_kwargs.update(default_kwargs)
# user kwargs take precedence over default kwargs
write_kwargs.update(self.kwargs)
writer = cparser.FastWriter(table, **write_kwargs)
writer.write(output, header_output, output_types)
class FastCsv(FastBasic):
"""
A faster version of the ordinary :class:`Csv` writer that uses the
optimized C parsing engine. Note that this reader will append empty
field values to the end of any row with not enough columns, while
:class:`FastBasic` simply raises an error.
"""
_format_name = "fast_csv"
_description = "Comma-separated values table using the fast C engine"
_fast = True
fill_extra_cols = True
def __init__(self, **kwargs):
super().__init__({"delimiter": ",", "comment": None}, **kwargs)
def write(self, table, output):
"""
Override the default write method of `FastBasic` to
output masked values as empty fields.
"""
self._write(table, output, {"fill_values": [(core.masked, "")]})
class FastTab(FastBasic):
"""
A faster version of the ordinary :class:`Tab` reader that uses
the optimized C parsing engine.
"""
_format_name = "fast_tab"
_description = "Tab-separated values table using the fast C engine"
_fast = True
def __init__(self, **kwargs):
super().__init__({"delimiter": "\t"}, **kwargs)
self.strip_whitespace_lines = False
self.strip_whitespace_fields = False
class FastNoHeader(FastBasic):
"""
This class uses the fast C engine to read tables with no header line. If
the names parameter is unspecified, the columns will be autonamed with
"col{}".
"""
_format_name = "fast_no_header"
_description = "Basic table with no headers using the fast C engine"
_fast = True
def __init__(self, **kwargs):
super().__init__({"header_start": None, "data_start": 0}, **kwargs)
def write(self, table, output):
"""
Override the default writing behavior in `FastBasic` so
that columns names are not included in output.
"""
self._write(table, output, {}, header_output=None)
class FastCommentedHeader(FastBasic):
"""
A faster version of the :class:`CommentedHeader` reader, which looks for
column names in a commented line. ``header_start`` denotes the index of
the header line among all commented lines and is 0 by default.
"""
_format_name = "fast_commented_header"
_description = "Columns name in a commented line using the fast C engine"
_fast = True
def __init__(self, **kwargs):
super().__init__({}, **kwargs)
# Mimic CommentedHeader's behavior in which data_start
# is relative to header_start if unspecified; see #2692
if "data_start" not in kwargs:
self.data_start = 0
def make_table(self, data, comments):
"""
Actually make the output table give the data and comments. This is
slightly different from the base FastBasic method in the way comments
are handled.
"""
meta = OrderedDict()
if comments:
idx = self.header_start
if idx < 0:
idx = len(comments) + idx
meta["comments"] = comments[:idx] + comments[idx + 1 :]
if not meta["comments"]:
del meta["comments"]
names = core._deduplicate_names(self.engine.get_names())
return Table(data, names=names, meta=meta)
def _read_header(self):
tmp = self.engine.source
commented_lines = []
for line in tmp.splitlines():
line = line.lstrip()
if line and line[0] == self.comment: # line begins with a comment
commented_lines.append(line[1:])
if len(commented_lines) == self.header_start + 1:
break
if len(commented_lines) <= self.header_start:
raise cparser.CParserError("not enough commented lines")
self.engine.setup_tokenizer([commented_lines[self.header_start]])
self.engine.header_start = 0
self.engine.read_header()
self.engine.setup_tokenizer(tmp)
def write(self, table, output):
"""
Override the default writing behavior in `FastBasic` so
that column names are commented.
"""
self._write(table, output, {}, header_output="comment")
class FastRdb(FastBasic):
"""
A faster version of the :class:`Rdb` reader. This format is similar to
tab-delimited, but it also contains a header line after the column
name line denoting the type of each column (N for numeric, S for string).
"""
_format_name = "fast_rdb"
_description = "Tab-separated with a type definition header line"
_fast = True
def __init__(self, **kwargs):
super().__init__({"delimiter": "\t", "data_start": 2}, **kwargs)
self.strip_whitespace_lines = False
self.strip_whitespace_fields = False
def _read_header(self):
tmp = self.engine.source
line1 = ""
line2 = ""
for line in tmp.splitlines():
# valid non-comment line
if not line1 and line.strip() and line.lstrip()[0] != self.comment:
line1 = line
elif not line2 and line.strip() and line.lstrip()[0] != self.comment:
line2 = line
break
else: # less than 2 lines in table
raise ValueError("RDB header requires 2 lines")
# Tokenize the two header lines separately.
# Each call to self.engine.read_header by default
# - calls _deduplicate_names to ensure unique header_names
# - sets self.names from self.header_names if not provided as kwarg
# - applies self.include_names/exclude_names to self.names.
# For parsing the types disable 1+3, but self.names needs to be set.
self.engine.setup_tokenizer([line2])
self.engine.header_start = 0
self.engine.read_header(deduplicate=False, filter_names=False)
types = self.engine.get_header_names()
# If no kwarg names have been passed, reset to have column names read from header line 1.
if types == self.engine.get_names():
self.engine.set_names([])
self.engine.setup_tokenizer([line1])
# Get full list of column names prior to applying include/exclude_names,
# which have to be applied to the unique name set after deduplicate.
self.engine.read_header(deduplicate=True, filter_names=False)
col_names = self.engine.get_names()
self.engine.read_header(deduplicate=False)
if len(col_names) != len(types):
raise core.InconsistentTableError(
"RDB header mismatch between number of column names and column types"
)
# If columns have been removed via include/exclude_names, extract matching types.
if len(self.engine.get_names()) != len(types):
types = [types[col_names.index(n)] for n in self.engine.get_names()]
if any(not re.match(r"\d*(N|S)$", x, re.IGNORECASE) for x in types):
raise core.InconsistentTableError(
f"RDB type definitions do not all match [num](N|S): {types}"
)
try_int = {}
try_float = {}
try_string = {}
for name, col_type in zip(self.engine.get_names(), types):
if col_type[-1].lower() == "s":
try_int[name] = 0
try_float[name] = 0
try_string[name] = 1
else:
try_int[name] = 1
try_float[name] = 1
try_string[name] = 0
self.engine.setup_tokenizer(tmp)
return (try_int, try_float, try_string)
def write(self, table, output):
"""
Override the default writing behavior in `FastBasic` to
output a line with column types after the column name line.
"""
self._write(table, output, {}, output_types=True)
|
0af387450af0f63a3b7df613877697466955f22cb9cfd221d090108aca5f9738 | """A Collection of useful miscellaneous functions.
misc.py:
Collection of useful miscellaneous functions.
:Author: Hannes Breytenbach ([email protected])
"""
import collections.abc
import itertools
import operator
def first_true_index(iterable, pred=None, default=None):
"""find the first index position for the which the callable pred returns True"""
if pred is None:
func = operator.itemgetter(1)
else:
func = lambda x: pred(x[1])
# either index-item pair or default
ii = next(filter(func, enumerate(iterable)), default)
return ii[0] if ii else default
def first_false_index(iterable, pred=None, default=None):
"""find the first index position for the which the callable pred returns False"""
if pred is None:
func = operator.not_
else:
func = lambda x: not pred(x)
return first_true_index(iterable, func, default)
def sortmore(*args, **kw):
"""
Sorts any number of lists according to:
optionally given item sorting key function(s) and/or a global sorting key function.
Parameters
----------
One or more lists
Keywords
--------
globalkey : None
revert to sorting by key function
globalkey : callable
Sort by evaluated value for all items in the lists
(call signature of this function needs to be such that it accepts an
argument tuple of items from each list.
eg.: ``globalkey = lambda *l: sum(l)`` will order all the lists by the
sum of the items from each list
if key: None
sorting done by value of first input list
(in this case the objects in the first iterable need the comparison
methods __lt__ etc...)
if key: callable
sorting done by value of key(item) for items in first iterable
if key: tuple
sorting done by value of (key(item_0), ..., key(item_n)) for items in
the first n iterables (where n is the length of the key tuple)
i.e. the first callable is the primary sorting criterion, and the
rest act as tie-breakers.
Returns
-------
Sorted lists
Examples
--------
Capture sorting indices::
l = list('CharacterS')
In [1]: sortmore( l, range(len(l)) )
Out[1]: (['C', 'S', 'a', 'a', 'c', 'e', 'h', 'r', 'r', 't'],
[0, 9, 2, 4, 5, 7, 1, 3, 8, 6])
In [2]: sortmore( l, range(len(l)), key=str.lower )
Out[2]: (['a', 'a', 'C', 'c', 'e', 'h', 'r', 'r', 'S', 't'],
[2, 4, 0, 5, 7, 1, 3, 8, 9, 6])
"""
first = list(args[0])
if not len(first):
return args
globalkey = kw.get("globalkey")
key = kw.get("key")
if key is None:
if globalkey:
# if global sort function given and no local (secondary) key given, ==> no tiebreakers
key = lambda x: 0
else:
# if no global sort and no local sort keys given, sort by item values
key = lambda x: x
if globalkey is None:
globalkey = lambda *x: 0
if not isinstance(globalkey, collections.abc.Callable):
raise ValueError("globalkey needs to be callable")
if isinstance(key, collections.abc.Callable):
k = lambda x: (globalkey(*x), key(x[0]))
elif isinstance(key, tuple):
key = (k if k else lambda x: 0 for k in key)
k = lambda x: (globalkey(*x),) + tuple(f(z) for (f, z) in zip(key, x))
else:
raise KeyError(
"kw arg 'key' should be None, callable, or a sequence of callables, not {}".format(
type(key)
)
)
res = sorted(list(zip(*args)), key=k)
if "order" in kw:
if kw["order"].startswith(("descend", "reverse")):
res = reversed(res)
return tuple(map(list, zip(*res)))
def groupmore(func=None, *its):
"""Extends the itertools.groupby functionality to arbitrary number of iterators."""
if not func:
func = lambda x: x
its = sortmore(*its, key=func)
nfunc = lambda x: func(x[0])
zipper = itertools.groupby(zip(*its), nfunc)
unzipper = ((key, zip(*groups)) for key, groups in zipper)
return unzipper
|
3c1f7bb52b18f939c25f063de25c70b4aa05c76d31fe7bfde8806676983be154 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
An extensible ASCII table reader and writer.
Classes to read DAOphot table format
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import itertools as itt
import re
from collections import OrderedDict, defaultdict
import numpy as np
from . import core, fixedwidth
from .misc import first_false_index, first_true_index, groupmore
class DaophotHeader(core.BaseHeader):
"""
Read the header from a file produced by the IRAF DAOphot routine.
"""
comment = r"\s*#K"
# Regex for extracting the format strings
re_format = re.compile(r"%-?(\d+)\.?\d?[sdfg]")
re_header_keyword = re.compile(
r"[#]K" r"\s+ (?P<name> \w+)" r"\s* = (?P<stuff> .+) $", re.VERBOSE
)
aperture_values = ()
def __init__(self):
core.BaseHeader.__init__(self)
def parse_col_defs(self, grouped_lines_dict):
"""
Parse a series of column definition lines like below. There may be several
such blocks in a single file (where continuation characters have already been
stripped).
#N ID XCENTER YCENTER MAG MERR MSKY NITER
#U ## pixels pixels magnitudes magnitudes counts ##
#F %-9d %-10.3f %-10.3f %-12.3f %-14.3f %-15.7g %-6d
"""
line_ids = ("#N", "#U", "#F")
coldef_dict = defaultdict(list)
# Function to strip identifier lines
stripper = lambda s: s[2:].strip(" \\")
for defblock in zip(*map(grouped_lines_dict.get, line_ids)):
for key, line in zip(line_ids, map(stripper, defblock)):
coldef_dict[key].append(line.split())
# Save the original columns so we can use it later to reconstruct the
# original header for writing
if self.data.is_multiline:
# Database contains multi-aperture data.
# Autogen column names, units, formats from last row of column headers
last_names, last_units, last_formats = list(
zip(*map(coldef_dict.get, line_ids))
)[-1]
N_multiline = len(self.data.first_block)
for i in np.arange(1, N_multiline + 1).astype("U2"):
# extra column names eg. RAPERT2, SUM2 etc...
extended_names = list(map("".join, zip(last_names, itt.repeat(i))))
if i == "1": # Enumerate the names starting at 1
coldef_dict["#N"][-1] = extended_names
else:
coldef_dict["#N"].append(extended_names)
coldef_dict["#U"].append(last_units)
coldef_dict["#F"].append(last_formats)
# Get column widths from column format specifiers
get_col_width = lambda s: int(self.re_format.search(s).groups()[0])
col_widths = [
[get_col_width(f) for f in formats] for formats in coldef_dict["#F"]
]
# original data format might be shorter than 80 characters and filled with spaces
row_widths = np.fromiter(map(sum, col_widths), int)
row_short = Daophot.table_width - row_widths
# fix last column widths
for w, r in zip(col_widths, row_short):
w[-1] += r
self.col_widths = col_widths
# merge the multi-line header data into single line data
coldef_dict = {k: sum(v, []) for (k, v) in coldef_dict.items()}
return coldef_dict
def update_meta(self, lines, meta):
"""
Extract table-level keywords for DAOphot table. These are indicated by
a leading '#K ' prefix.
"""
table_meta = meta["table"]
# self.lines = self.get_header_lines(lines)
Nlines = len(self.lines)
if Nlines > 0:
# Group the header lines according to their line identifiers (#K,
# #N, #U, #F or just # (spacer line)) function that grabs the line
# identifier
get_line_id = lambda s: s.split(None, 1)[0]
# Group lines by the line identifier ('#N', '#U', '#F', '#K') and
# capture line index
gid, groups = zip(*groupmore(get_line_id, self.lines, range(Nlines)))
# Groups of lines and their indices
grouped_lines, gix = zip(*groups)
# Dict of line groups keyed by line identifiers
grouped_lines_dict = dict(zip(gid, grouped_lines))
# Update the table_meta keywords if necessary
if "#K" in grouped_lines_dict:
keywords = OrderedDict(
map(self.extract_keyword_line, grouped_lines_dict["#K"])
)
table_meta["keywords"] = keywords
coldef_dict = self.parse_col_defs(grouped_lines_dict)
line_ids = ("#N", "#U", "#F")
for name, unit, fmt in zip(*map(coldef_dict.get, line_ids)):
meta["cols"][name] = {"unit": unit, "format": fmt}
self.meta = meta
self.names = coldef_dict["#N"]
def extract_keyword_line(self, line):
"""
Extract info from a header keyword line (#K)
"""
m = self.re_header_keyword.match(line)
if m:
vals = m.group("stuff").strip().rsplit(None, 2)
keyword_dict = {
"units": vals[-2],
"format": vals[-1],
"value": (vals[0] if len(vals) > 2 else ""),
}
return m.group("name"), keyword_dict
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines`` for a DAOphot
header. The DAOphot header is specialized so that we just copy the entire BaseHeader
get_cols routine and modify as needed.
Parameters
----------
lines : list
List of table lines
Returns
-------
col : list
List of table Columns
"""
if not self.names:
raise core.InconsistentTableError("No column names found in DAOphot header")
# Create the list of io.ascii column objects
self._set_cols_from_names()
# Set unit and format as needed.
coldefs = self.meta["cols"]
for col in self.cols:
unit, fmt = map(coldefs[col.name].get, ("unit", "format"))
if unit != "##":
col.unit = unit
if fmt != "##":
col.format = fmt
# Set column start and end positions.
col_width = sum(self.col_widths, [])
ends = np.cumsum(col_width)
starts = ends - col_width
for i, col in enumerate(self.cols):
col.start, col.end = starts[i], ends[i]
col.span = col.end - col.start
if hasattr(col, "format"):
if any(x in col.format for x in "fg"):
col.type = core.FloatType
elif "d" in col.format:
col.type = core.IntType
elif "s" in col.format:
col.type = core.StrType
# INDEF is the missing value marker
self.data.fill_values.append(("INDEF", "0"))
class DaophotData(core.BaseData):
splitter_class = fixedwidth.FixedWidthSplitter
start_line = 0
comment = r"\s*#"
def __init__(self):
core.BaseData.__init__(self)
self.is_multiline = False
def get_data_lines(self, lines):
# Special case for multiline daophot databases. Extract the aperture
# values from the first multiline data block
if self.is_multiline:
# Grab the first column of the special block (aperture values) and
# recreate the aperture description string
aplist = next(zip(*map(str.split, self.first_block)))
self.header.aperture_values = tuple(map(float, aplist))
# Set self.data.data_lines to a slice of lines contain the data rows
core.BaseData.get_data_lines(self, lines)
class DaophotInputter(core.ContinuationLinesInputter):
continuation_char = "\\"
multiline_char = "*"
replace_char = " "
re_multiline = re.compile(r"(#?)[^\\*#]*(\*?)(\\*) ?$")
def search_multiline(self, lines, depth=150):
"""
Search lines for special continuation character to determine number of
continued rows in a datablock. For efficiency, depth gives the upper
limit of lines to search.
"""
# The list of apertures given in the #K APERTURES keyword may not be
# complete!! This happens if the string description of the aperture
# list is longer than the field width of the #K APERTURES field. In
# this case we have to figure out how many apertures there are based on
# the file structure.
comment, special, cont = zip(
*(self.re_multiline.search(line).groups() for line in lines[:depth])
)
# Find first non-comment line
data_start = first_false_index(comment)
# No data in lines[:depth]. This may be because there is no data in
# the file, or because the header is really huge. If the latter,
# increasing the search depth should help
if data_start is None:
return None, None, lines[:depth]
header_lines = lines[:data_start]
# Find first line ending on special row continuation character '*'
# indexed relative to data_start
first_special = first_true_index(special[data_start:depth])
if first_special is None: # no special lines
return None, None, header_lines
# last line ending on special '*', but not on line continue '/'
last_special = first_false_index(special[data_start + first_special : depth])
# index relative to first_special
# if first_special is None: #no end of special lines within search
# depth! increase search depth return self.search_multiline( lines,
# depth=2*depth )
# indexing now relative to line[0]
markers = np.cumsum([data_start, first_special, last_special])
# multiline portion of first data block
multiline_block = lines[markers[1] : markers[-1]]
return markers, multiline_block, header_lines
def process_lines(self, lines):
markers, block, header = self.search_multiline(lines)
self.data.is_multiline = markers is not None
self.data.markers = markers
self.data.first_block = block
# set the header lines returned by the search as a attribute of the header
self.data.header.lines = header
if markers is not None:
lines = lines[markers[0] :]
continuation_char = self.continuation_char
multiline_char = self.multiline_char
replace_char = self.replace_char
parts = []
outlines = []
for i, line in enumerate(lines):
mo = self.re_multiline.search(line)
if mo:
comment, special, cont = mo.groups()
if comment or cont:
line = line.replace(continuation_char, replace_char)
if special:
line = line.replace(multiline_char, replace_char)
if cont and not comment:
parts.append(line)
if not cont:
parts.append(line)
outlines.append("".join(parts))
parts = []
else:
raise core.InconsistentTableError(
f"multiline re could not match line {i}: {line}"
)
return outlines
class Daophot(core.BaseReader):
"""
DAOphot format table.
Example::
#K MERGERAD = INDEF scaleunit %-23.7g
#K IRAF = NOAO/IRAFV2.10EXPORT version %-23s
#K USER = davis name %-23s
#K HOST = tucana computer %-23s
#
#N ID XCENTER YCENTER MAG MERR MSKY NITER \\
#U ## pixels pixels magnitudes magnitudes counts ## \\
#F %-9d %-10.3f %-10.3f %-12.3f %-14.3f %-15.7g %-6d
#
#N SHARPNESS CHI PIER PERROR \\
#U ## ## ## perrors \\
#F %-23.3f %-12.3f %-6d %-13s
#
14 138.538 INDEF 15.461 0.003 34.85955 4 \\
-0.032 0.802 0 No_error
The keywords defined in the #K records are available via the output table
``meta`` attribute::
>>> import os
>>> from astropy.io import ascii
>>> filename = os.path.join(ascii.__path__[0], 'tests/data/daophot.dat')
>>> data = ascii.read(filename)
>>> for name, keyword in data.meta['keywords'].items():
... print(name, keyword['value'], keyword['units'], keyword['format'])
...
MERGERAD INDEF scaleunit %-23.7g
IRAF NOAO/IRAFV2.10EXPORT version %-23s
USER name %-23s
...
The unit and formats are available in the output table columns::
>>> for colname in data.colnames:
... col = data[colname]
... print(colname, col.unit, col.format)
...
ID None %-9d
XCENTER pixels %-10.3f
YCENTER pixels %-10.3f
...
Any column values of INDEF are interpreted as a missing value and will be
masked out in the resultant table.
In case of multi-aperture daophot files containing repeated entries for the last
row of fields, extra unique column names will be created by suffixing
corresponding field names with numbers starting from 2 to N (where N is the
total number of apertures).
For example,
first aperture radius will be RAPERT and corresponding magnitude will be MAG,
second aperture radius will be RAPERT2 and corresponding magnitude will be MAG2,
third aperture radius will be RAPERT3 and corresponding magnitude will be MAG3,
and so on.
"""
_format_name = "daophot"
_io_registry_format_aliases = ["daophot"]
_io_registry_can_write = False
_description = "IRAF DAOphot format table"
header_class = DaophotHeader
data_class = DaophotData
inputter_class = DaophotInputter
table_width = 80
def __init__(self):
core.BaseReader.__init__(self)
# The inputter needs to know about the data (see DaophotInputter.process_lines)
self.inputter.data = self.data
def write(self, table=None):
raise NotImplementedError
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.