hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
68fc557d042fb272459d4f7f0b4be74f3dc1f6bedb9de3d27809a71762fe8dde | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy import units as u
from astropy.coordinates import BaseCoordinateFrame
__all__ = [
"select_step_degree",
"select_step_hour",
"select_step_scalar",
"transform_contour_set_inplace",
]
def select_step_degree(dv):
# Modified from axis_artist, supports astropy.units
if dv > 1.0 * u.arcsec:
degree_limits_ = [1.5, 3, 7, 13, 20, 40, 70, 120, 270, 520]
degree_steps_ = [1, 2, 5, 10, 15, 30, 45, 90, 180, 360]
degree_units = [u.degree] * len(degree_steps_)
minsec_limits_ = [1.5, 2.5, 3.5, 8, 11, 18, 25, 45]
minsec_steps_ = [1, 2, 3, 5, 10, 15, 20, 30]
minute_limits_ = np.array(minsec_limits_) / 60.0
minute_units = [u.arcmin] * len(minute_limits_)
second_limits_ = np.array(minsec_limits_) / 3600.0
second_units = [u.arcsec] * len(second_limits_)
degree_limits = np.concatenate([second_limits_, minute_limits_, degree_limits_])
degree_steps = minsec_steps_ + minsec_steps_ + degree_steps_
degree_units = second_units + minute_units + degree_units
n = degree_limits.searchsorted(dv.to(u.degree))
step = degree_steps[n]
unit = degree_units[n]
return step * unit
else:
return select_step_scalar(dv.to_value(u.arcsec)) * u.arcsec
def select_step_hour(dv):
if dv > 15.0 * u.arcsec:
hour_limits_ = [1.5, 2.5, 3.5, 5, 7, 10, 15, 21, 36]
hour_steps_ = [1, 2, 3, 4, 6, 8, 12, 18, 24]
hour_units = [u.hourangle] * len(hour_steps_)
minsec_limits_ = [1.5, 2.5, 3.5, 4.5, 5.5, 8, 11, 14, 18, 25, 45]
minsec_steps_ = [1, 2, 3, 4, 5, 6, 10, 12, 15, 20, 30]
minute_limits_ = np.array(minsec_limits_) / 60.0
minute_units = [15.0 * u.arcmin] * len(minute_limits_)
second_limits_ = np.array(minsec_limits_) / 3600.0
second_units = [15.0 * u.arcsec] * len(second_limits_)
hour_limits = np.concatenate([second_limits_, minute_limits_, hour_limits_])
hour_steps = minsec_steps_ + minsec_steps_ + hour_steps_
hour_units = second_units + minute_units + hour_units
n = hour_limits.searchsorted(dv.to(u.hourangle))
step = hour_steps[n]
unit = hour_units[n]
return step * unit
else:
return select_step_scalar(dv.to_value(15.0 * u.arcsec)) * (15.0 * u.arcsec)
def select_step_scalar(dv):
log10_dv = np.log10(dv)
base = np.floor(log10_dv)
frac = log10_dv - base
steps = np.log10([1, 2, 5, 10])
imin = np.argmin(np.abs(frac - steps))
return 10.0 ** (base + steps[imin])
def get_coord_meta(frame):
coord_meta = {}
coord_meta["type"] = ("longitude", "latitude")
coord_meta["wrap"] = (None, None)
coord_meta["unit"] = (u.deg, u.deg)
from astropy.coordinates import frame_transform_graph
if isinstance(frame, str):
initial_frame = frame
frame = frame_transform_graph.lookup_name(frame)
if frame is None:
raise ValueError(f"Unknown frame: {initial_frame}")
if not isinstance(frame, BaseCoordinateFrame):
frame = frame()
names = list(frame.representation_component_names.keys())
coord_meta["name"] = names[:2]
return coord_meta
def transform_contour_set_inplace(cset, transform):
"""
Transform a contour set in-place using a specified
:class:`matplotlib.transform.Transform`.
Using transforms with the native Matplotlib contour/contourf can be slow if
the transforms have a non-negligible overhead (which is the case for
WCS/SkyCoord transforms) since the transform is called for each individual
contour line. It is more efficient to stack all the contour lines together
temporarily and transform them in one go.
"""
# The contours are represented as paths grouped into levels. Each can have
# one or more paths. The approach we take here is to stack the vertices of
# all paths and transform them in one go. The pos_level list helps us keep
# track of where the set of segments for each overall contour level ends.
# The pos_segments list helps us keep track of where each segmnt ends for
# each contour level.
all_paths = []
pos_level = []
pos_segments = []
for collection in cset.collections:
paths = collection.get_paths()
if len(paths) == 0:
continue
all_paths.append(paths)
# The last item in pos isn't needed for np.split and in fact causes
# issues if we keep it because it will cause an extra empty array to be
# returned.
pos = np.cumsum([len(x) for x in paths])
pos_segments.append(pos[:-1])
pos_level.append(pos[-1])
# As above the last item isn't needed
pos_level = np.cumsum(pos_level)[:-1]
# Stack all the segments into a single (n, 2) array
vertices = [path.vertices for paths in all_paths for path in paths]
if len(vertices) > 0:
vertices = np.concatenate(vertices)
else:
return
# Transform all coordinates in one go
vertices = transform.transform(vertices)
# Split up into levels again
vertices = np.split(vertices, pos_level)
# Now re-populate the segments in the line collections
for ilevel, vert in enumerate(vertices):
vert = np.split(vert, pos_segments[ilevel])
for iseg, ivert in enumerate(vert):
all_paths[ilevel][iseg].vertices = ivert
|
021befb5706ca75a7e6c73e91b07b33de9f318956a47f600636661afb93fcac0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import numpy as np
from astropy import units as u
# Algorithm inspired by PGSBOX from WCSLIB by M. Calabretta
LONLAT = {"longitude", "latitude"}
def wrap_180(values):
values_new = values % 360.0
with np.errstate(invalid="ignore"):
values_new[values_new > 180.0] -= 360
return values_new
def find_coordinate_range(transform, extent, coord_types, coord_units, coord_wraps):
"""
Find the range of coordinates to use for ticks/grids.
Parameters
----------
transform : func
Function to transform pixel to world coordinates. Should take two
values (the pixel coordinates) and return two values (the world
coordinates).
extent : iterable
The range of the image viewport in pixel coordinates, given as [xmin,
xmax, ymin, ymax].
coord_types : list of str
Whether each coordinate is a ``'longitude'``, ``'latitude'``, or
``'scalar'`` value.
coord_units : list of `astropy.units.Unit`
The units for each coordinate.
coord_wraps : list of `astropy.units.Quantity`
The wrap angles for longitudes.
"""
# Sample coordinates on a NX x NY grid.
from . import conf
if len(extent) == 4:
nx = ny = conf.coordinate_range_samples
x = np.linspace(extent[0], extent[1], nx + 1)
y = np.linspace(extent[2], extent[3], ny + 1)
xp, yp = np.meshgrid(x, y)
with np.errstate(invalid="ignore"):
world = transform.transform(np.vstack([xp.ravel(), yp.ravel()]).transpose())
else:
nx = conf.coordinate_range_samples
xp = np.linspace(extent[0], extent[1], nx + 1)[None]
with np.errstate(invalid="ignore"):
world = transform.transform(xp.T)
ranges = []
for coord_index, coord_type in enumerate(coord_types):
xw = world[:, coord_index].reshape(xp.shape)
if coord_type in LONLAT:
unit = coord_units[coord_index]
xw = xw * unit.to(u.deg)
# Iron out coordinates along first row
wjump = xw[0, 1:] - xw[0, :-1]
with np.errstate(invalid="ignore"):
reset = np.abs(wjump) > 180.0
if np.any(reset):
wjump = wjump + np.sign(wjump) * 180.0
wjump = 360.0 * np.trunc(wjump / 360.0)
xw[0, 1:][reset] -= wjump[reset]
# Now iron out coordinates along all columns, starting with first row.
wjump = xw[1:] - xw[:1]
with np.errstate(invalid="ignore"):
reset = np.abs(wjump) > 180.0
if np.any(reset):
wjump = wjump + np.sign(wjump) * 180.0
wjump = 360.0 * np.trunc(wjump / 360.0)
xw[1:][reset] -= wjump[reset]
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
xw_min = np.nanmin(xw)
xw_max = np.nanmax(xw)
# Check if range is smaller when normalizing to the range 0 to 360
if coord_type in LONLAT:
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
xw_min_check = np.nanmin(xw % 360.0)
xw_max_check = np.nanmax(xw % 360.0)
if xw_max_check - xw_min_check <= xw_max - xw_min < 360.0:
xw_min = xw_min_check
xw_max = xw_max_check
# Check if range is smaller when normalizing to the range -180 to 180
if coord_type in LONLAT:
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
xw_min_check = np.nanmin(wrap_180(xw))
xw_max_check = np.nanmax(wrap_180(xw))
if (
xw_max_check - xw_min_check < 360.0
and xw_max - xw_min >= xw_max_check - xw_min_check
):
xw_min = xw_min_check
xw_max = xw_max_check
x_range = xw_max - xw_min
if coord_type == "longitude":
if x_range > 300.0:
xw_min = coord_wraps[coord_index].to_value(u.deg) - 360
xw_max = coord_wraps[coord_index].to_value(u.deg) - np.spacing(360.0)
elif xw_min < 0.0:
xw_min = max(-180.0, xw_min - 0.1 * x_range)
xw_max = min(+180.0, xw_max + 0.1 * x_range)
else:
xw_min = max(0.0, xw_min - 0.1 * x_range)
xw_max = min(360.0, xw_max + 0.1 * x_range)
elif coord_type == "latitude":
xw_min = max(-90.0, xw_min - 0.1 * x_range)
xw_max = min(+90.0, xw_max + 0.1 * x_range)
if coord_type in LONLAT:
xw_min *= u.deg.to(unit)
xw_max *= u.deg.to(unit)
ranges.append((xw_min, xw_max))
return ranges
|
aab15529825f6b9786d2b8f762a70cc6ef4e3f90c3f9a8c58f6650b2f04424af | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import abc
from collections import OrderedDict
import numpy as np
from matplotlib import rcParams
from matplotlib.lines import Line2D, Path
from matplotlib.patches import PathPatch
__all__ = [
"RectangularFrame1D",
"Spine",
"BaseFrame",
"RectangularFrame",
"EllipticalFrame",
]
class Spine:
"""
A single side of an axes.
This does not need to be a straight line, but represents a 'side' when
determining which part of the frame to put labels and ticks on.
Parameters
----------
parent_axes : `~astropy.visualization.wcsaxes.WCSAxes`
The parent axes
transform : `~matplotlib.transforms.Transform`
The transform from data to world
data_func : callable
If not ``None``, it should be a function that returns the appropriate spine
data when called with this object as the sole argument. If ``None``, the
spine data must be manually updated in ``update_spines()``.
"""
def __init__(self, parent_axes, transform, *, data_func=None):
self.parent_axes = parent_axes
self.transform = transform
self.data_func = data_func
self._data = None
self._pixel = None
self._world = None
@property
def data(self):
if self._data is None and self.data_func:
self.data = self.data_func(self)
return self._data
@data.setter
def data(self, value):
self._data = value
if value is None:
self._pixel = None
self._world = None
else:
self._pixel = self.parent_axes.transData.transform(self._data)
with np.errstate(invalid="ignore"):
self._world = self.transform.transform(self._data)
self._update_normal()
@property
def pixel(self):
return self._pixel
@pixel.setter
def pixel(self, value):
self._pixel = value
if value is None:
self._data = None
self._world = None
else:
self._data = self.parent_axes.transData.inverted().transform(self._data)
self._world = self.transform.transform(self._data)
self._update_normal()
@property
def world(self):
return self._world
@world.setter
def world(self, value):
self._world = value
if value is None:
self._data = None
self._pixel = None
else:
self._data = self.transform.transform(value)
self._pixel = self.parent_axes.transData.transform(self._data)
self._update_normal()
def _update_normal(self):
# Find angle normal to border and inwards, in display coordinate
dx = self.pixel[1:, 0] - self.pixel[:-1, 0]
dy = self.pixel[1:, 1] - self.pixel[:-1, 1]
self.normal_angle = np.degrees(np.arctan2(dx, -dy))
def _halfway_x_y_angle(self):
"""
Return the x, y, normal_angle values halfway along the spine.
"""
x_disp, y_disp = self.pixel[:, 0], self.pixel[:, 1]
# Get distance along the path
d = np.hstack(
[0.0, np.cumsum(np.sqrt(np.diff(x_disp) ** 2 + np.diff(y_disp) ** 2))]
)
xcen = np.interp(d[-1] / 2.0, d, x_disp)
ycen = np.interp(d[-1] / 2.0, d, y_disp)
# Find segment along which the mid-point lies
imin = np.searchsorted(d, d[-1] / 2.0) - 1
# Find normal of the axis label facing outwards on that segment
normal_angle = self.normal_angle[imin] + 180.0
return xcen, ycen, normal_angle
class SpineXAligned(Spine):
"""
A single side of an axes, aligned with the X data axis.
This does not need to be a straight line, but represents a 'side' when
determining which part of the frame to put labels and ticks on.
"""
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
if value is None:
self._pixel = None
self._world = None
else:
self._pixel = self.parent_axes.transData.transform(self._data)
with np.errstate(invalid="ignore"):
self._world = self.transform.transform(self._data[:, 0:1])
self._update_normal()
@property
def pixel(self):
return self._pixel
@pixel.setter
def pixel(self, value):
self._pixel = value
if value is None:
self._data = None
self._world = None
else:
self._data = self.parent_axes.transData.inverted().transform(self._data)
self._world = self.transform.transform(self._data[:, 0:1])
self._update_normal()
class BaseFrame(OrderedDict, metaclass=abc.ABCMeta):
"""
Base class for frames, which are collections of
:class:`~astropy.visualization.wcsaxes.frame.Spine` instances.
"""
spine_class = Spine
def __init__(self, parent_axes, transform, path=None):
super().__init__()
self.parent_axes = parent_axes
self._transform = transform
self._linewidth = rcParams["axes.linewidth"]
self._color = rcParams["axes.edgecolor"]
self._path = path
for axis in self.spine_names:
self[axis] = self.spine_class(parent_axes, transform)
@property
def origin(self):
ymin, ymax = self.parent_axes.get_ylim()
return "lower" if ymin < ymax else "upper"
@property
def transform(self):
return self._transform
@transform.setter
def transform(self, value):
self._transform = value
for axis in self:
self[axis].transform = value
def _update_patch_path(self):
self.update_spines()
x, y = [], []
for axis in self.spine_names:
x.append(self[axis].data[:, 0])
y.append(self[axis].data[:, 1])
vertices = np.vstack([np.hstack(x), np.hstack(y)]).transpose()
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
@property
def patch(self):
self._update_patch_path()
return PathPatch(
self._path,
transform=self.parent_axes.transData,
facecolor=rcParams["axes.facecolor"],
edgecolor="white",
)
def draw(self, renderer):
for axis in self.spine_names:
x, y = self[axis].pixel[:, 0], self[axis].pixel[:, 1]
line = Line2D(
x, y, linewidth=self._linewidth, color=self._color, zorder=1000
)
line.draw(renderer)
def sample(self, n_samples):
self.update_spines()
spines = OrderedDict()
for axis in self:
data = self[axis].data
spines[axis] = self.spine_class(self.parent_axes, self.transform)
if data.size > 0:
p = np.linspace(0.0, 1.0, data.shape[0])
p_new = np.linspace(0.0, 1.0, n_samples)
spines[axis].data = np.array(
[np.interp(p_new, p, d) for d in data.T]
).transpose()
else:
spines[axis].data = data
return spines
def set_color(self, color):
"""
Sets the color of the frame.
Parameters
----------
color : str
The color of the frame.
"""
self._color = color
def get_color(self):
return self._color
def set_linewidth(self, linewidth):
"""
Sets the linewidth of the frame.
Parameters
----------
linewidth : float
The linewidth of the frame in points.
"""
self._linewidth = linewidth
def get_linewidth(self):
return self._linewidth
def update_spines(self):
for spine in self.values():
if spine.data_func:
spine.data = spine.data_func(spine)
class RectangularFrame1D(BaseFrame):
"""
A classic rectangular frame.
"""
spine_names = "bt"
spine_class = SpineXAligned
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
self["b"].data = np.array(([xmin, ymin], [xmax, ymin]))
self["t"].data = np.array(([xmax, ymax], [xmin, ymax]))
super().update_spines()
def _update_patch_path(self):
self.update_spines()
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
x = [xmin, xmax, xmax, xmin, xmin]
y = [ymin, ymin, ymax, ymax, ymin]
vertices = np.vstack([np.hstack(x), np.hstack(y)]).transpose()
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
def draw(self, renderer):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
x = [xmin, xmax, xmax, xmin, xmin]
y = [ymin, ymin, ymax, ymax, ymin]
line = Line2D(
x,
y,
linewidth=self._linewidth,
color=self._color,
zorder=1000,
transform=self.parent_axes.transData,
)
line.draw(renderer)
class RectangularFrame(BaseFrame):
"""
A classic rectangular frame.
"""
spine_names = "brtl"
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
self["b"].data = np.array(([xmin, ymin], [xmax, ymin]))
self["r"].data = np.array(([xmax, ymin], [xmax, ymax]))
self["t"].data = np.array(([xmax, ymax], [xmin, ymax]))
self["l"].data = np.array(([xmin, ymax], [xmin, ymin]))
super().update_spines()
class EllipticalFrame(BaseFrame):
"""
An elliptical frame.
"""
spine_names = "chv"
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
xmid = 0.5 * (xmax + xmin)
ymid = 0.5 * (ymax + ymin)
dx = xmid - xmin
dy = ymid - ymin
theta = np.linspace(0.0, 2 * np.pi, 1000)
self["c"].data = np.array(
[xmid + dx * np.cos(theta), ymid + dy * np.sin(theta)]
).transpose()
self["h"].data = np.array(
[np.linspace(xmin, xmax, 1000), np.repeat(ymid, 1000)]
).transpose()
self["v"].data = np.array(
[np.repeat(xmid, 1000), np.linspace(ymin, ymax, 1000)]
).transpose()
super().update_spines()
def _update_patch_path(self):
"""Override path patch to include only the outer ellipse,
not the major and minor axes in the middle.
"""
self.update_spines()
vertices = self["c"].data
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
def draw(self, renderer):
"""Override to draw only the outer ellipse,
not the major and minor axes in the middle.
FIXME: we may want to add a general method to give the user control
over which spines are drawn.
"""
axis = "c"
x, y = self[axis].pixel[:, 0], self[axis].pixel[:, 1]
line = Line2D(x, y, linewidth=self._linewidth, color=self._color, zorder=1000)
line.draw(renderer)
|
8ff89f49e94a50f9a9c7be93a1d6e3eb12297517ce04d4531e1d0a981de18d50 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from matplotlib.lines import Path
from astropy.coordinates.angle_utilities import angular_separation
# Tolerance for WCS round-tripping, relative to the scale size
ROUND_TRIP_RTOL = 1.0
# Tolerance for discontinuities relative to the median
DISCONT_FACTOR = 10.0
def get_lon_lat_path(lon_lat, pixel, lon_lat_check):
"""
Draw a curve, taking into account discontinuities.
Parameters
----------
lon_lat : ndarray
The longitude and latitude values along the curve, given as a (n,2)
array.
pixel : ndarray
The pixel coordinates corresponding to ``lon_lat``
lon_lat_check : ndarray
The world coordinates derived from converting from ``pixel``, which is
used to ensure round-tripping.
"""
# In some spherical projections, some parts of the curve are 'behind' or
# 'in front of' the plane of the image, so we find those by reversing the
# transformation and finding points where the result is not consistent.
sep = angular_separation(
np.radians(lon_lat[:, 0]),
np.radians(lon_lat[:, 1]),
np.radians(lon_lat_check[:, 0]),
np.radians(lon_lat_check[:, 1]),
)
# Define the relevant scale size using the separation between the first two points
scale_size = angular_separation(
*np.radians(lon_lat[0, :]), *np.radians(lon_lat[1, :])
)
with np.errstate(invalid="ignore"):
sep[sep > np.pi] -= 2.0 * np.pi
mask = np.abs(sep > ROUND_TRIP_RTOL * scale_size)
# Mask values with invalid pixel positions
mask = mask | np.isnan(pixel[:, 0]) | np.isnan(pixel[:, 1])
# We can now start to set up the codes for the Path.
codes = np.zeros(lon_lat.shape[0], dtype=np.uint8)
codes[:] = Path.LINETO
codes[0] = Path.MOVETO
codes[mask] = Path.MOVETO
# Also need to move to point *after* a hidden value
codes[1:][mask[:-1]] = Path.MOVETO
# We now go through and search for discontinuities in the curve that would
# be due to the curve going outside the field of view, invalid WCS values,
# or due to discontinuities in the projection.
# We start off by pre-computing the step in pixel coordinates from one
# point to the next. The idea is to look for large jumps that might indicate
# discontinuities.
step = np.sqrt(
(pixel[1:, 0] - pixel[:-1, 0]) ** 2 + (pixel[1:, 1] - pixel[:-1, 1]) ** 2
)
# We search for discontinuities by looking for places where the step
# is larger by more than a given factor compared to the median
# discontinuous = step > DISCONT_FACTOR * np.median(step)
discontinuous = step[1:] > DISCONT_FACTOR * step[:-1]
# Skip over discontinuities
codes[2:][discontinuous] = Path.MOVETO
# The above missed the first step, so check that too
if step[0] > DISCONT_FACTOR * step[1]:
codes[1] = Path.MOVETO
# Create the path
path = Path(pixel, codes=codes)
return path
def get_gridline_path(world, pixel):
"""
Draw a grid line.
Parameters
----------
world : ndarray
The longitude and latitude values along the curve, given as a (n,2)
array.
pixel : ndarray
The pixel coordinates corresponding to ``lon_lat``
"""
# Mask values with invalid pixel positions
mask = np.isnan(pixel[:, 0]) | np.isnan(pixel[:, 1])
# We can now start to set up the codes for the Path.
codes = np.zeros(world.shape[0], dtype=np.uint8)
codes[:] = Path.LINETO
codes[0] = Path.MOVETO
codes[mask] = Path.MOVETO
# Also need to move to point *after* a hidden value
codes[1:][mask[:-1]] = Path.MOVETO
# We now go through and search for discontinuities in the curve that would
# be due to the curve going outside the field of view, invalid WCS values,
# or due to discontinuities in the projection.
# Create the path
path = Path(pixel, codes=codes)
return path
|
81c0ad57cfbb975a221aada1cade6a5e59c62b61342fda65d1ec08df376136a5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
from contextlib import nullcontext
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pytest
from matplotlib.contour import QuadContourSet
from packaging.version import Version
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.utils.data import get_pkg_data_filename
from astropy.visualization.wcsaxes.core import WCSAxes
from astropy.visualization.wcsaxes.frame import (
EllipticalFrame,
RectangularFrame,
RectangularFrame1D,
)
from astropy.visualization.wcsaxes.transforms import CurvedTransform
from astropy.visualization.wcsaxes.utils import get_coord_meta
from astropy.wcs import WCS
from astropy.wcs.wcsapi import HighLevelWCSWrapper, SlicedLowLevelWCS
ft_version = Version(matplotlib.ft2font.__freetype_version__)
FREETYPE_261 = ft_version == Version("2.6.1")
# We cannot use matplotlib.checkdep_usetex() anymore, see
# https://github.com/matplotlib/matplotlib/issues/23244
TEX_UNAVAILABLE = True
# matplotlib 3.7 is not released yet.
MATPLOTLIB_LT_3_7 = Version(matplotlib.__version__) < Version("3.6.99")
def teardown_function(function):
plt.close("all")
def test_grid_regression(ignore_matplotlibrc):
# Regression test for a bug that meant that if the rc parameter
# axes.grid was set to True, WCSAxes would crash upon initialization.
plt.rc("axes", grid=True)
fig = plt.figure(figsize=(3, 3))
WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
def test_format_coord_regression(ignore_matplotlibrc, tmp_path):
# Regression test for a bug that meant that if format_coord was called by
# Matplotlib before the axes were drawn, an error occurred.
fig = plt.figure(figsize=(3, 3))
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
assert ax.format_coord(10, 10) == ""
assert ax.coords[0].format_coord(10) == ""
assert ax.coords[1].format_coord(10) == ""
fig.savefig(tmp_path / "nothing")
assert ax.format_coord(10, 10) == "10.0 10.0 (world)"
assert ax.coords[0].format_coord(10) == "10.0"
assert ax.coords[1].format_coord(10) == "10.0"
TARGET_HEADER = fits.Header.fromstring(
"""
NAXIS = 2
NAXIS1 = 200
NAXIS2 = 100
CTYPE1 = 'RA---MOL'
CRPIX1 = 500
CRVAL1 = 180.0
CDELT1 = -0.4
CUNIT1 = 'deg '
CTYPE2 = 'DEC--MOL'
CRPIX2 = 400
CRVAL2 = 0.0
CDELT2 = 0.4
CUNIT2 = 'deg '
COORDSYS= 'icrs '
""",
sep="\n",
)
@pytest.mark.parametrize("grid_type", ["lines", "contours"])
def test_no_numpy_warnings(ignore_matplotlibrc, tmp_path, grid_type):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
ax.imshow(np.zeros((100, 200)))
ax.coords.grid(color="white", grid_type=grid_type)
# There should be no warnings raised if some pixels are outside WCS
# (since this is normal).
# BUT our own catch_warning was ignoring some warnings before, so now we
# have to catch it. Otherwise, the pytest filterwarnings=error
# setting in setup.cfg will fail this test.
# There are actually multiple warnings but they are all similar.
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message=r".*converting a masked element to nan.*"
)
warnings.filterwarnings(
"ignore", message=r".*No contour levels were found within the data range.*"
)
warnings.filterwarnings(
"ignore", message=r".*np\.asscalar\(a\) is deprecated since NumPy v1\.16.*"
)
warnings.filterwarnings(
"ignore", message=r".*PY_SSIZE_T_CLEAN will be required.*"
)
fig.savefig(tmp_path / "test.png")
def test_invalid_frame_overlay(ignore_matplotlibrc):
# Make sure a nice error is returned if a frame doesn't exist
ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
with pytest.raises(ValueError, match=r"Frame banana not found"):
ax.get_coords_overlay("banana")
with pytest.raises(ValueError, match=r"Unknown frame: banana"):
get_coord_meta("banana")
def test_plot_coord_transform(ignore_matplotlibrc):
twoMASS_k_header = get_pkg_data_filename("data/2MASS_k_header")
twoMASS_k_header = fits.Header.fromtextfile(twoMASS_k_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8], projection=WCS(twoMASS_k_header), aspect="equal"
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(359.76045223 * u.deg, 0.26876217 * u.deg)
with pytest.raises(TypeError):
ax.plot_coord(c, "o", transform=ax.get_transform("galactic"))
def test_scatter_coord_transform(ignore_matplotlibrc):
twoMASS_k_header = get_pkg_data_filename("data/2MASS_k_header")
twoMASS_k_header = fits.Header.fromtextfile(twoMASS_k_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8], projection=WCS(twoMASS_k_header), aspect="equal"
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(359.76045223 * u.deg, 0.26876217 * u.deg)
with pytest.raises(TypeError):
ax.scatter_coord(c, marker="o", transform=ax.get_transform("galactic"))
def test_set_label_properties(ignore_matplotlibrc):
# Regression test to make sure that arguments passed to
# set_xlabel/set_ylabel are passed to the underlying coordinate helpers
ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
ax.set_xlabel("Test x label", labelpad=2, color="red")
ax.set_ylabel("Test y label", labelpad=3, color="green")
assert ax.coords[0].axislabels.get_text() == "Test x label"
assert ax.coords[0].axislabels.get_minpad("b") == 2
assert ax.coords[0].axislabels.get_color() == "red"
assert ax.coords[1].axislabels.get_text() == "Test y label"
assert ax.coords[1].axislabels.get_minpad("l") == 3
assert ax.coords[1].axislabels.get_color() == "green"
assert ax.get_xlabel() == "Test x label"
assert ax.get_ylabel() == "Test y label"
GAL_HEADER = fits.Header.fromstring(
"""
SIMPLE = T / conforms to FITS standard
BITPIX = -32 / array data type
NAXIS = 3 / number of array dimensions
NAXIS1 = 31
NAXIS2 = 2881
NAXIS3 = 480
EXTEND = T
CTYPE1 = 'DISTMOD '
CRVAL1 = 3.5
CDELT1 = 0.5
CRPIX1 = 1.0
CTYPE2 = 'GLON-CAR'
CRVAL2 = 180.0
CDELT2 = -0.125
CRPIX2 = 1.0
CTYPE3 = 'GLAT-CAR'
CRVAL3 = 0.0
CDELT3 = 0.125
CRPIX3 = 241.0
""",
sep="\n",
)
def test_slicing_warnings(ignore_matplotlibrc, tmp_path):
# Regression test to make sure that no warnings are emitted by the tick
# locator for the sliced axis when slicing a cube.
# Scalar case
wcs3d = WCS(naxis=3)
wcs3d.wcs.ctype = ["x", "y", "z"]
wcs3d.wcs.cunit = ["deg", "deg", "km/s"]
wcs3d.wcs.crpix = [614.5, 856.5, 333]
wcs3d.wcs.cdelt = [6.25, 6.25, 23]
wcs3d.wcs.crval = [0.0, 0.0, 1.0]
with warnings.catch_warnings():
# https://github.com/astropy/astropy/issues/9690
warnings.filterwarnings("ignore", message=r".*PY_SSIZE_T_CLEAN.*")
plt.subplot(1, 1, 1, projection=wcs3d, slices=("x", "y", 1))
plt.savefig(tmp_path / "test.png")
# Angle case
wcs3d = WCS(GAL_HEADER)
with warnings.catch_warnings():
# https://github.com/astropy/astropy/issues/9690
warnings.filterwarnings("ignore", message=r".*PY_SSIZE_T_CLEAN.*")
plt.clf()
plt.subplot(1, 1, 1, projection=wcs3d, slices=("x", "y", 2))
plt.savefig(tmp_path / "test.png")
def test_plt_xlabel_ylabel(tmp_path):
# Regression test for a bug that happened when using plt.xlabel
# and plt.ylabel with Matplotlib 3.0
plt.subplot(projection=WCS())
plt.xlabel("Galactic Longitude")
plt.ylabel("Galactic Latitude")
plt.savefig(tmp_path / "test.png")
def test_grid_type_contours_transform(tmp_path):
# Regression test for a bug that caused grid_type='contours' to not work
# with custom transforms
class CustomTransform(CurvedTransform):
# We deliberately don't define the inverse, and has_inverse should
# default to False.
def transform(self, values):
return values * 1.3
transform = CustomTransform()
coord_meta = {
"type": ("scalar", "scalar"),
"unit": (u.m, u.s),
"wrap": (None, None),
"name": ("x", "y"),
}
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], transform=transform, coord_meta=coord_meta)
fig.add_axes(ax)
ax.grid(grid_type="contours")
fig.savefig(tmp_path / "test.png")
def test_plt_imshow_origin():
# Regression test for a bug that caused origin to be set to upper when
# plt.imshow was called.
ax = plt.subplot(projection=WCS())
plt.imshow(np.ones((2, 2)))
assert ax.get_xlim() == (-0.5, 1.5)
assert ax.get_ylim() == (-0.5, 1.5)
def test_ax_imshow_origin():
# Regression test for a bug that caused origin to be set to upper when
# ax.imshow was called with no origin
ax = plt.subplot(projection=WCS())
ax.imshow(np.ones((2, 2)))
assert ax.get_xlim() == (-0.5, 1.5)
assert ax.get_ylim() == (-0.5, 1.5)
def test_grid_contour_large_spacing(tmp_path):
# Regression test for a bug that caused a crash when grid was called and
# didn't produce grid lines (due e.g. to too large spacing) and was then
# called again.
filename = tmp_path / "test.png"
ax = plt.subplot(projection=WCS())
ax.set_xlim(-0.5, 1.5)
ax.set_ylim(-0.5, 1.5)
ax.coords[0].set_ticks(values=[] * u.one)
ax.coords[0].grid(grid_type="contours")
plt.savefig(filename)
ax.coords[0].grid(grid_type="contours")
plt.savefig(filename)
def test_contour_return():
# Regression test for a bug that caused contour and contourf to return None
# instead of the contour object.
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
cset = ax.contour(np.arange(16).reshape(4, 4), transform=ax.get_transform("world"))
assert isinstance(cset, QuadContourSet)
cset = ax.contourf(np.arange(16).reshape(4, 4), transform=ax.get_transform("world"))
assert isinstance(cset, QuadContourSet)
def test_contour_empty():
# Regression test for a bug that caused contour to crash if no contours
# were present.
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
if MATPLOTLIB_LT_3_7:
ctx = pytest.warns(
UserWarning, match="No contour levels were found within the data range"
)
else:
ctx = nullcontext()
with ctx:
ax.contour(np.zeros((4, 4)), transform=ax.get_transform("world"))
def test_iterate_coords(ignore_matplotlibrc):
# Regression test for a bug that caused ax.coords to return too few axes
wcs3d = WCS(naxis=3)
wcs3d.wcs.ctype = ["x", "y", "z"]
wcs3d.wcs.cunit = ["deg", "deg", "km/s"]
wcs3d.wcs.crpix = [614.5, 856.5, 333]
wcs3d.wcs.cdelt = [6.25, 6.25, 23]
wcs3d.wcs.crval = [0.0, 0.0, 1.0]
ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=("x", "y", 1))
x, y, z = ax.coords
def test_invalid_slices_errors(ignore_matplotlibrc):
# Make sure that users get a clear message when specifying a WCS with
# >2 dimensions without giving the 'slices' argument, or if the 'slices'
# argument has too many/few elements.
wcs3d = WCS(naxis=3)
wcs3d.wcs.ctype = ["x", "y", "z"]
plt.subplot(1, 1, 1, projection=wcs3d, slices=("x", "y", 1))
with pytest.raises(
ValueError,
match=r"WCS has more than 2 pixel dimensions, so 'slices' should be set",
):
plt.subplot(1, 1, 1, projection=wcs3d)
with pytest.raises(
ValueError,
match=(
r"'slices' should have as many elements as WCS has pixel dimensions .should"
r" be 3."
),
):
plt.subplot(1, 1, 1, projection=wcs3d, slices=("x", "y", 1, 2))
wcs2d = WCS(naxis=2)
wcs2d.wcs.ctype = ["x", "y"]
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d)
assert ax.frame_class is RectangularFrame
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=("x", "y"))
assert ax.frame_class is RectangularFrame
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=("y", "x"))
assert ax.frame_class is RectangularFrame
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=["x", "y"])
assert ax.frame_class is RectangularFrame
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=(1, "x"))
assert ax.frame_class is RectangularFrame1D
wcs1d = WCS(naxis=1)
wcs1d.wcs.ctype = ["x"]
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs1d)
assert ax.frame_class is RectangularFrame1D
with pytest.raises(ValueError):
plt.subplot(1, 1, 1, projection=wcs2d, slices=(1, "y"))
EXPECTED_REPR_1 = """
<CoordinatesMap with 3 world coordinates:
index aliases type ... wrap format_unit visible
----- ------------------------------ --------- ... --------- ----------- -------
0 distmod dist scalar ... None no
1 pos.galactic.lon glon-car glon longitude ... 360.0 deg deg yes
2 pos.galactic.lat glat-car glat latitude ... None deg yes
>
""".strip()
EXPECTED_REPR_2 = """
<CoordinatesMap with 3 world coordinates:
index aliases type ... wrap format_unit visible
----- ------------------------------ --------- ... --------- ----------- -------
0 distmod dist scalar ... None yes
1 pos.galactic.lon glon-car glon longitude ... 360.0 deg deg yes
2 pos.galactic.lat glat-car glat latitude ... None deg yes
>
""".strip()
def test_repr(ignore_matplotlibrc):
# Unit test to make sure __repr__ looks as expected
wcs3d = WCS(GAL_HEADER)
# Cube header has world coordinates as distance, lon, lat, so start off
# by slicing in a way that we select just lon,lat:
ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=(1, "x", "y"))
assert repr(ax.coords) == EXPECTED_REPR_1
# Now slice in a way that all world coordinates are still present:
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=("x", "y", 1))
assert repr(ax.coords) == EXPECTED_REPR_2
@pytest.fixture
def time_spectral_wcs_2d():
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["FREQ", "TIME"]
wcs.wcs.set()
return wcs
def test_time_wcs(time_spectral_wcs_2d):
# Regression test for a bug that caused WCSAxes to error when using a WCS
# with a time axis.
plt.subplot(projection=time_spectral_wcs_2d)
@pytest.mark.skipif(TEX_UNAVAILABLE, reason="TeX is unavailable")
def test_simplify_labels_usetex(ignore_matplotlibrc, tmp_path):
"""Regression test for https://github.com/astropy/astropy/issues/8004."""
plt.rc("text", usetex=True)
header = {
"NAXIS": 2,
"NAXIS1": 360,
"NAXIS2": 180,
"CRPIX1": 180.5,
"CRPIX2": 90.5,
"CRVAL1": 180.0,
"CRVAL2": 0.0,
"CDELT1": -2 * np.sqrt(2) / np.pi,
"CDELT2": 2 * np.sqrt(2) / np.pi,
"CTYPE1": "RA---MOL",
"CTYPE2": "DEC--MOL",
"RADESYS": "ICRS",
}
wcs = WCS(header)
fig, ax = plt.subplots(subplot_kw=dict(frame_class=EllipticalFrame, projection=wcs))
ax.set_xlim(-0.5, header["NAXIS1"] - 0.5)
ax.set_ylim(-0.5, header["NAXIS2"] - 0.5)
ax.coords[0].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[0].set_ticks(spacing=45 * u.deg)
ax.coords[1].set_ticks(spacing=30 * u.deg)
ax.grid()
fig.savefig(tmp_path / "plot.png")
@pytest.mark.parametrize("frame_class", [RectangularFrame, EllipticalFrame])
def test_set_labels_with_coords(ignore_matplotlibrc, frame_class):
"""Test if ``axis.set_xlabel()`` calls the correct ``coords[i]_set_axislabel()`` in a
WCS plot. Regression test for https://github.com/astropy/astropy/issues/10435.
"""
labels = ["RA", "Declination"]
header = {
"NAXIS": 2,
"NAXIS1": 360,
"NAXIS2": 180,
"CRPIX1": 180.5,
"CRPIX2": 90.5,
"CRVAL1": 180.0,
"CRVAL2": 0.0,
"CDELT1": -2 * np.sqrt(2) / np.pi,
"CDELT2": 2 * np.sqrt(2) / np.pi,
"CTYPE1": "RA---AIT",
"CTYPE2": "DEC--AIT",
}
wcs = WCS(header)
fig, ax = plt.subplots(subplot_kw=dict(frame_class=frame_class, projection=wcs))
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
assert ax.get_xlabel() == labels[0]
assert ax.get_ylabel() == labels[1]
for i in range(2):
assert ax.coords[i].get_axislabel() == labels[i]
@pytest.mark.parametrize("atol", [0.2, 1.0e-8])
def test_bbox_size(atol):
# Test for the size of a WCSAxes bbox (only have Matplotlib >= 3.0 now)
extents = [11.38888888888889, 3.5, 576.0, 432.0]
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
fig.canvas.draw()
renderer = fig.canvas.renderer
ax_bbox = ax.get_tightbbox(renderer)
# Enforce strict test only with reference Freetype version
if atol < 0.1 and not FREETYPE_261:
pytest.xfail(
"Exact BoundingBox dimensions are only ensured with FreeType 2.6.1"
)
assert np.allclose(ax_bbox.extents, extents, atol=atol)
def test_wcs_type_transform_regression():
wcs = WCS(TARGET_HEADER)
sliced_wcs = SlicedLowLevelWCS(wcs, np.s_[1:-1, 1:-1])
ax = plt.subplot(1, 1, 1, projection=wcs)
ax.get_transform(sliced_wcs)
high_wcs = HighLevelWCSWrapper(sliced_wcs)
ax.get_transform(sliced_wcs)
def test_multiple_draws_grid_contours(tmp_path):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=WCS())
ax.grid(color="black", grid_type="contours")
fig.savefig(tmp_path / "plot.png")
fig.savefig(tmp_path / "plot.png")
def test_get_coord_range_nan_regression():
# Test to make sure there is no internal casting of NaN to integers
# NumPy 1.24 raises a RuntimeWarning if a NaN is cast to an integer
wcs = WCS(TARGET_HEADER)
wcs.wcs.crval[0] = 0 # Re-position the longitude wrap to the middle
ax = plt.subplot(1, 1, 1, projection=wcs)
# Set the Y limits within valid latitudes/declinations
ax.set_ylim(300, 500)
# Set the X limits within valid longitudes/RAs, so the world coordinates have no NaNs
ax.set_xlim(300, 700)
assert np.allclose(
ax.coords.get_coord_range(),
np.array(
[
(-123.5219272110385, 122.49684897692201),
(-44.02289164685554, 44.80732766607591),
]
),
)
# Extend the X limits to include invalid longitudes/RAs, so the world coordinates have NaNs
ax.set_xlim(0, 700)
assert np.allclose(
ax.coords.get_coord_range(),
np.array(
[(-131.3193386797236, 180.0), (-44.02289164685554, 44.80732766607591)]
),
)
|
84c5d94706ecf6a921a502ed919d0a67f868af5f458eadfd652bcf8f65bfc511 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import matplotlib.pyplot as plt
import numpy as np
from astropy import units as u
from astropy.tests.figures import figure_test
from astropy.visualization.wcsaxes import WCSAxes
from astropy.visualization.wcsaxes.transforms import CurvedTransform
from astropy.wcs import WCS
from .test_images import BaseImageTests
# Create fake transforms that roughly mimic a polar projection
class DistanceToLonLat(CurvedTransform):
has_inverse = True
def __init__(self, R=6e3):
super().__init__()
self.R = R
def transform(self, xy):
x, y = xy[:, 0], xy[:, 1]
lam = np.degrees(np.arctan2(y, x))
phi = 90.0 - np.degrees(np.hypot(x, y) / self.R)
return np.array((lam, phi)).transpose()
transform_non_affine = transform
def inverted(self):
return LonLatToDistance(R=self.R)
class LonLatToDistance(CurvedTransform):
def __init__(self, R=6e3):
super().__init__()
self.R = R
def transform(self, lamphi):
lam, phi = lamphi[:, 0], lamphi[:, 1]
r = np.radians(90 - phi) * self.R
x = r * np.cos(np.radians(lam))
y = r * np.sin(np.radians(lam))
return np.array((x, y)).transpose()
transform_non_affine = transform
def inverted(self):
return DistanceToLonLat(R=self.R)
class TestTransformCoordMeta(BaseImageTests):
@figure_test
def test_coords_overlay(self):
# Set up a simple WCS that maps pixels to non-projected distances
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["x", "y"]
wcs.wcs.cunit = ["km", "km"]
wcs.wcs.crpix = [614.5, 856.5]
wcs.wcs.cdelt = [6.25, 6.25]
wcs.wcs.crval = [0.0, 0.0]
fig = plt.figure(figsize=(4, 4))
ax = WCSAxes(fig, [0.15, 0.15, 0.7, 0.7], wcs=wcs)
fig.add_axes(ax)
s = DistanceToLonLat(R=6378.273)
ax.coords["x"].set_ticklabel_position("")
ax.coords["y"].set_ticklabel_position("")
coord_meta = {}
coord_meta["type"] = ("longitude", "latitude")
coord_meta["wrap"] = (360.0 * u.deg, None)
coord_meta["unit"] = (u.deg, u.deg)
coord_meta["name"] = "lon", "lat"
overlay = ax.get_coords_overlay(s, coord_meta=coord_meta)
overlay.grid(color="red")
overlay["lon"].grid(color="red", linestyle="solid", alpha=0.3)
overlay["lat"].grid(color="blue", linestyle="solid", alpha=0.3)
overlay["lon"].set_ticklabel(size=7, exclude_overlapping=True)
overlay["lat"].set_ticklabel(size=7, exclude_overlapping=True)
overlay["lon"].set_ticklabel_position("brtl")
overlay["lat"].set_ticklabel_position("brtl")
overlay["lon"].set_ticks(spacing=10.0 * u.deg)
overlay["lat"].set_ticks(spacing=10.0 * u.deg)
ax.set_xlim(-0.5, 1215.5)
ax.set_ylim(-0.5, 1791.5)
return fig
@figure_test
def test_coords_overlay_auto_coord_meta(self):
fig = plt.figure(figsize=(4, 4))
ax = WCSAxes(fig, [0.15, 0.15, 0.7, 0.7], wcs=WCS(self.msx_header))
fig.add_axes(ax)
ax.grid(color="red", alpha=0.5, linestyle="solid")
overlay = ax.get_coords_overlay("fk5") # automatically sets coord_meta
overlay.grid(color="black", alpha=0.5, linestyle="solid")
overlay["ra"].set_ticks(color="black")
overlay["dec"].set_ticks(color="black")
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
return fig
@figure_test
def test_direct_init(self):
s = DistanceToLonLat(R=6378.273)
coord_meta = {}
coord_meta["type"] = ("longitude", "latitude")
coord_meta["wrap"] = (360.0 * u.deg, None)
coord_meta["unit"] = (u.deg, u.deg)
coord_meta["name"] = "lon", "lat"
fig = plt.figure(figsize=(4, 4))
ax = WCSAxes(fig, [0.15, 0.15, 0.7, 0.7], transform=s, coord_meta=coord_meta)
fig.add_axes(ax)
ax.coords["lon"].grid(color="red", linestyle="solid", alpha=0.3)
ax.coords["lat"].grid(color="blue", linestyle="solid", alpha=0.3)
ax.coords["lon"].set_auto_axislabel(False)
ax.coords["lat"].set_auto_axislabel(False)
ax.coords["lon"].set_ticklabel(size=7, exclude_overlapping=True)
ax.coords["lat"].set_ticklabel(size=7, exclude_overlapping=True)
ax.coords["lon"].set_ticklabel_position("brtl")
ax.coords["lat"].set_ticklabel_position("brtl")
ax.coords["lon"].set_ticks(spacing=10.0 * u.deg)
ax.coords["lat"].set_ticks(spacing=10.0 * u.deg)
ax.set_xlim(-400.0, 500.0)
ax.set_ylim(-300.0, 400.0)
return fig
|
d4b4650f5e5cf18ac24f47cb448a74fab2c6441c7d77188552ac16993de0aacc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
from textwrap import dedent
import matplotlib.pyplot as plt
import numpy as np
import pytest
from matplotlib.transforms import Affine2D, IdentityTransform
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.tests.figures import figure_test
from astropy.time import Time
from astropy.units import Quantity
from astropy.utils.data import get_pkg_data_filename
from astropy.visualization.wcsaxes.frame import RectangularFrame, RectangularFrame1D
from astropy.visualization.wcsaxes.wcsapi import (
WCSWorld2PixelTransform,
apply_slices,
transform_coord_meta_from_wcs,
)
from astropy.wcs import WCS
from astropy.wcs.wcsapi import BaseLowLevelWCS, SlicedLowLevelWCS
@pytest.fixture
def plt_close():
yield
plt.close("all")
WCS2D = WCS(naxis=2)
WCS2D.wcs.ctype = ["x", "y"]
WCS2D.wcs.cunit = ["km", "km"]
WCS2D.wcs.crpix = [614.5, 856.5]
WCS2D.wcs.cdelt = [6.25, 6.25]
WCS2D.wcs.crval = [0.0, 0.0]
WCS3D = WCS(naxis=3)
WCS3D.wcs.ctype = ["x", "y", "z"]
WCS3D.wcs.cunit = ["km", "km", "km"]
WCS3D.wcs.crpix = [614.5, 856.5, 333]
WCS3D.wcs.cdelt = [6.25, 6.25, 23]
WCS3D.wcs.crval = [0.0, 0.0, 1.0]
@pytest.fixture
def wcs_4d():
header = dedent(
"""\
WCSAXES = 4 / Number of coordinate axes
CRPIX1 = 0.0 / Pixel coordinate of reference point
CRPIX2 = 0.0 / Pixel coordinate of reference point
CRPIX3 = 0.0 / Pixel coordinate of reference point
CRPIX4 = 5.0 / Pixel coordinate of reference point
CDELT1 = 0.4 / [min] Coordinate increment at reference point
CDELT2 = 2E-11 / [m] Coordinate increment at reference point
CDELT3 = 0.0027777777777778 / [deg] Coordinate increment at reference point
CDELT4 = 0.0013888888888889 / [deg] Coordinate increment at reference point
CUNIT1 = 'min' / Units of coordinate increment and value
CUNIT2 = 'm' / Units of coordinate increment and value
CUNIT3 = 'deg' / Units of coordinate increment and value
CUNIT4 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'TIME' / Coordinate type code
CTYPE2 = 'WAVE' / Vacuum wavelength (linear)
CTYPE3 = 'HPLT-TAN' / Coordinate type codegnomonic projection
CTYPE4 = 'HPLN-TAN' / Coordinate type codegnomonic projection
CRVAL1 = 0.0 / [min] Coordinate value at reference point
CRVAL2 = 0.0 / [m] Coordinate value at reference point
CRVAL3 = 0.0 / [deg] Coordinate value at reference point
CRVAL4 = 0.0 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 0.0 / [deg] Native latitude of celestial pole
"""
)
return WCS(header=fits.Header.fromstring(header, sep="\n"))
@pytest.fixture
def cube_wcs():
cube_header = get_pkg_data_filename("data/cube_header")
header = fits.Header.fromtextfile(cube_header)
return WCS(header=header)
def test_shorthand_inversion():
"""
Test that the Matplotlib subtraction shorthand for composing and inverting
transformations works.
"""
w1 = WCS(naxis=2)
w1.wcs.ctype = ["RA---TAN", "DEC--TAN"]
w1.wcs.crpix = [256.0, 256.0]
w1.wcs.cdelt = [-0.05, 0.05]
w1.wcs.crval = [120.0, -19.0]
w2 = WCS(naxis=2)
w2.wcs.ctype = ["RA---SIN", "DEC--SIN"]
w2.wcs.crpix = [256.0, 256.0]
w2.wcs.cdelt = [-0.05, 0.05]
w2.wcs.crval = [235.0, +23.7]
t1 = WCSWorld2PixelTransform(w1)
t2 = WCSWorld2PixelTransform(w2)
assert t1 - t2 == t1 + t2.inverted()
assert t1 - t2 != t2.inverted() + t1
assert t1 - t1 == IdentityTransform()
# We add Affine2D to catch the fact that in Matplotlib, having a Composite
# transform can end up in more strict requirements for the dimensionality.
def test_2d():
world = np.ones((10, 2))
w1 = WCSWorld2PixelTransform(WCS2D) + Affine2D()
pixel = w1.transform(world)
world_2 = w1.inverted().transform(pixel)
np.testing.assert_allclose(world, world_2)
def test_3d():
world = np.ones((10, 2))
w1 = WCSWorld2PixelTransform(WCS3D[:, 0, :]) + Affine2D()
pixel = w1.transform(world)
world_2 = w1.inverted().transform(pixel)
np.testing.assert_allclose(world[:, 0], world_2[:, 0])
np.testing.assert_allclose(world[:, 1], world_2[:, 1])
def test_coord_type_from_ctype(cube_wcs):
_, coord_meta = transform_coord_meta_from_wcs(
cube_wcs, RectangularFrame, slices=(50, "y", "x")
)
axislabel_position = coord_meta["default_axislabel_position"]
ticklabel_position = coord_meta["default_ticklabel_position"]
ticks_position = coord_meta["default_ticks_position"]
# These axes are swapped due to the pixel derivatives
assert axislabel_position == ["l", "r", "b"]
assert ticklabel_position == ["l", "r", "b"]
assert ticks_position == ["l", "r", "b"]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["GLON-TAN", "GLAT-TAN"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.cname = ["Longitude", ""]
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta["type"] == ["longitude", "latitude"]
assert coord_meta["format_unit"] == [u.deg, u.deg]
assert coord_meta["wrap"] == [None, None]
assert coord_meta["default_axis_label"] == ["Longitude", "pos.galactic.lat"]
assert coord_meta["name"] == [
("pos.galactic.lon", "glon-tan", "glon", "Longitude"),
("pos.galactic.lat", "glat-tan", "glat"),
]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["HPLN-TAN", "HPLT-TAN"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta["type"] == ["longitude", "latitude"]
assert coord_meta["format_unit"] == [u.arcsec, u.arcsec]
assert coord_meta["wrap"] == [180.0 * u.deg, None]
_, coord_meta = transform_coord_meta_from_wcs(
wcs, RectangularFrame, slices=("y", "x")
)
axislabel_position = coord_meta["default_axislabel_position"]
ticklabel_position = coord_meta["default_ticklabel_position"]
ticks_position = coord_meta["default_ticks_position"]
# These axes should be swapped because of slices
assert axislabel_position == ["l", "b"]
assert ticklabel_position == ["l", "b"]
assert ticks_position == ["bltr", "bltr"]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["HGLN-TAN", "HGLT-TAN"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta["type"] == ["longitude", "latitude"]
assert coord_meta["format_unit"] == [u.deg, u.deg]
assert coord_meta["wrap"] == [180.0 * u.deg, None]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["CRLN-TAN", "CRLT-TAN"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta["type"] == ["longitude", "latitude"]
assert coord_meta["format_unit"] == [u.deg, u.deg]
assert coord_meta["wrap"] == [360.0 * u.deg, None]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta["type"] == ["longitude", "latitude"]
assert coord_meta["format_unit"] == [u.hourangle, u.deg]
assert coord_meta["wrap"] == [None, None]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["spam", "spam"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta["type"] == ["scalar", "scalar"]
assert coord_meta["format_unit"] == [u.one, u.one]
assert coord_meta["wrap"] == [None, None]
def test_coord_type_1d_1d_wcs():
wcs = WCS(naxis=1)
wcs.wcs.ctype = ["WAVE"]
wcs.wcs.crpix = [256.0]
wcs.wcs.cdelt = [-0.05]
wcs.wcs.crval = [50.0]
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame1D)
assert coord_meta["type"] == ["scalar"]
assert coord_meta["format_unit"] == [u.m]
assert coord_meta["wrap"] == [None]
def test_coord_type_1d_2d_wcs_correlated():
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["GLON-TAN", "GLAT-TAN"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(
wcs, RectangularFrame1D, slices=("x", 0)
)
assert coord_meta["type"] == ["longitude", "latitude"]
assert coord_meta["format_unit"] == [u.deg, u.deg]
assert coord_meta["wrap"] == [None, None]
assert coord_meta["visible"] == [True, True]
def test_coord_type_1d_2d_wcs_uncorrelated():
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["WAVE", "UTC"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.cunit = ["nm", "s"]
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(
wcs, RectangularFrame1D, slices=("x", 0)
)
assert coord_meta["type"] == ["scalar", "scalar"]
assert coord_meta["format_unit"] == [u.m, u.s]
assert coord_meta["wrap"] == [None, None]
assert coord_meta["visible"] == [True, False]
def test_coord_meta_4d(wcs_4d):
_, coord_meta = transform_coord_meta_from_wcs(
wcs_4d, RectangularFrame, slices=(0, 0, "x", "y")
)
axislabel_position = coord_meta["default_axislabel_position"]
ticklabel_position = coord_meta["default_ticklabel_position"]
ticks_position = coord_meta["default_ticks_position"]
assert axislabel_position == ["", "", "b", "l"]
assert ticklabel_position == ["", "", "b", "l"]
assert ticks_position == ["", "", "bltr", "bltr"]
def test_coord_meta_4d_line_plot(wcs_4d):
_, coord_meta = transform_coord_meta_from_wcs(
wcs_4d, RectangularFrame1D, slices=(0, 0, 0, "x")
)
axislabel_position = coord_meta["default_axislabel_position"]
ticklabel_position = coord_meta["default_ticklabel_position"]
ticks_position = coord_meta["default_ticks_position"]
# These axes are swapped due to the pixel derivatives
assert axislabel_position == ["", "", "t", "b"]
assert ticklabel_position == ["", "", "t", "b"]
assert ticks_position == ["", "", "t", "b"]
@pytest.fixture
def sub_wcs(wcs_4d, wcs_slice):
return SlicedLowLevelWCS(wcs_4d, wcs_slice)
@pytest.mark.parametrize(
("wcs_slice", "wcsaxes_slices", "world_map", "ndim"),
[
(np.s_[...], [0, 0, "x", "y"], (2, 3), 2),
(np.s_[...], [0, "x", 0, "y"], (1, 2, 3), 3),
(np.s_[...], ["x", 0, 0, "y"], (0, 2, 3), 3),
(np.s_[...], ["x", "y", 0, 0], (0, 1), 2),
(np.s_[:, :, 0, :], [0, "x", "y"], (1, 2), 2),
(np.s_[:, :, 0, :], ["x", 0, "y"], (0, 1, 2), 3),
(np.s_[:, :, 0, :], ["x", "y", 0], (0, 1, 2), 3),
(np.s_[:, 0, :, :], ["x", "y", 0], (0, 1), 2),
],
)
def test_apply_slices(sub_wcs, wcs_slice, wcsaxes_slices, world_map, ndim):
transform_wcs, _, out_world_map = apply_slices(sub_wcs, wcsaxes_slices)
assert transform_wcs.world_n_dim == ndim
assert out_world_map == world_map
# parametrize here to pass to the fixture
@pytest.mark.parametrize("wcs_slice", [np.s_[:, :, 0, :]])
def test_sliced_ND_input(wcs_4d, sub_wcs, wcs_slice, plt_close):
slices_wcsaxes = [0, "x", "y"]
for sub_wcs in (sub_wcs, SlicedLowLevelWCS(wcs_4d, wcs_slice)):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
_, coord_meta = transform_coord_meta_from_wcs(
sub_wcs, RectangularFrame, slices=slices_wcsaxes
)
assert all(len(x) == 3 for x in coord_meta.values())
assert coord_meta["name"] == [
"time",
("custom:pos.helioprojective.lat", "hplt-tan", "hplt"),
("custom:pos.helioprojective.lon", "hpln-tan", "hpln"),
]
assert coord_meta["type"] == ["scalar", "latitude", "longitude"]
assert coord_meta["wrap"] == [None, None, 180.0 * u.deg]
assert coord_meta["unit"] == [u.Unit("min"), u.Unit("deg"), u.Unit("deg")]
assert coord_meta["visible"] == [False, True, True]
assert coord_meta["format_unit"] == [
u.Unit("min"),
u.Unit("arcsec"),
u.Unit("arcsec"),
]
assert coord_meta["default_axislabel_position"] == ["", "b", "l"]
assert coord_meta["default_ticklabel_position"] == ["", "b", "l"]
assert coord_meta["default_ticks_position"] == ["", "bltr", "bltr"]
# Validate the axes initialize correctly
plt.clf()
plt.subplot(projection=sub_wcs, slices=slices_wcsaxes)
class LowLevelWCS5D(BaseLowLevelWCS):
pixel_dim = 2
@property
def pixel_n_dim(self):
return self.pixel_dim
@property
def world_n_dim(self):
return 5
@property
def world_axis_physical_types(self):
return [
"em.freq",
"time",
"pos.eq.ra",
"pos.eq.dec",
"phys.polarization.stokes",
]
@property
def world_axis_units(self):
return ["Hz", "day", "deg", "deg", ""]
@property
def world_axis_names(self):
return ["Frequency", "", "RA", "DEC", ""]
def pixel_to_world_values(self, *pixel_arrays):
pixel_arrays = (list(pixel_arrays) * 3)[:-1] # make list have 5 elements
return [
np.asarray(pix) * scale
for pix, scale in zip(pixel_arrays, [10, 0.2, 0.4, 0.39, 2])
]
def world_to_pixel_values(self, *world_arrays):
world_arrays = world_arrays[:2] # make list have 2 elements
return [
np.asarray(world) / scale for world, scale in zip(world_arrays, [10, 0.2])
]
@property
def world_axis_object_components(self):
return [
("freq", 0, "value"),
("time", 0, "mjd"),
("celestial", 0, "spherical.lon.degree"),
("celestial", 1, "spherical.lat.degree"),
("stokes", 0, "value"),
]
@property
def world_axis_object_classes(self):
return {
"celestial": (SkyCoord, (), {"unit": "deg"}),
"time": (Time, (), {"format": "mjd"}),
"freq": (Quantity, (), {"unit": "Hz"}),
"stokes": (Quantity, (), {"unit": "one"}),
}
def test_edge_axes():
# Check that axes on the edge of a spherical projection are shown properley
# (see https://github.com/astropy/astropy/issues/10441)
shape = [180, 360]
data = np.random.rand(*shape)
header = {
"wcsaxes": 2,
"crpix1": 180.5,
"crpix2": 90.5,
"cdelt1": 1.0,
"cdelt2": 1.0,
"cunit1": "deg",
"cunit2": "deg",
"ctype1": "CRLN-CAR",
"ctype2": "CRLT-CAR",
"crval1": 0.0,
"crval2": 0.0,
"lonpole": 0.0,
"latpole": 90.0,
}
wcs = WCS(header)
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=wcs)
ax.imshow(data, origin="lower")
# By default the x- and y- axes should be drawn
lon = ax.coords[0]
lat = ax.coords[1]
fig.canvas.draw()
np.testing.assert_equal(
lon.ticks.world["b"], np.array([90.0, 180.0, 180.0, 270.0, 0.0])
)
np.testing.assert_equal(
lat.ticks.world["l"], np.array([-90.0, -60.0, -30.0, 0.0, 30.0, 60.0, 90.0])
)
def test_coord_meta_wcsapi():
wcs = LowLevelWCS5D()
wcs.pixel_dim = 5
_, coord_meta = transform_coord_meta_from_wcs(
wcs, RectangularFrame, slices=[0, 0, "x", "y", 0]
)
assert coord_meta["name"] == [
("em.freq", "Frequency"),
"time",
("pos.eq.ra", "RA"),
("pos.eq.dec", "DEC"),
"phys.polarization.stokes",
]
assert coord_meta["type"] == ["scalar", "scalar", "longitude", "latitude", "scalar"]
assert coord_meta["wrap"] == [None, None, None, None, None]
assert coord_meta["unit"] == [
u.Unit("Hz"),
u.Unit("d"),
u.Unit("deg"),
u.Unit("deg"),
u.one,
]
assert coord_meta["visible"] == [True, True, True, True, True]
assert coord_meta["format_unit"] == [
u.Unit("Hz"),
u.Unit("d"),
u.Unit("hourangle"),
u.Unit("deg"),
u.one,
]
assert coord_meta["default_axislabel_position"] == ["b", "l", "t", "r", ""]
assert coord_meta["default_ticklabel_position"] == ["b", "l", "t", "r", ""]
assert coord_meta["default_ticks_position"] == ["b", "l", "t", "r", ""]
assert coord_meta["default_axis_label"] == [
"Frequency",
"time",
"RA",
"DEC",
"phys.polarization.stokes",
]
@figure_test
def test_wcsapi_5d_with_names(plt_close):
# Test for plotting image and also setting values of ticks
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=LowLevelWCS5D())
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
return fig
|
5791989f262aeca3b25523096facafae8ea178199d0907c19eb4dd6ca57d82db | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import matplotlib.lines
import matplotlib.pyplot as plt
import numpy as np
import pytest
from matplotlib import rc_context
from matplotlib.figure import Figure
from matplotlib.patches import Circle, Rectangle
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.tests.figures import figure_test
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
from astropy.visualization.wcsaxes import WCSAxes, add_beam, add_scalebar
from astropy.visualization.wcsaxes.frame import EllipticalFrame
from astropy.visualization.wcsaxes.patches import Quadrangle, SphericalCircle
from astropy.wcs import WCS
class BaseImageTests:
@classmethod
def setup_class(cls):
msx_header = get_pkg_data_filename("data/msx_header")
cls.msx_header = fits.Header.fromtextfile(msx_header)
rosat_header = get_pkg_data_filename("data/rosat_header")
cls.rosat_header = fits.Header.fromtextfile(rosat_header)
twoMASS_k_header = get_pkg_data_filename("data/2MASS_k_header")
cls.twoMASS_k_header = fits.Header.fromtextfile(twoMASS_k_header)
cube_header = get_pkg_data_filename("data/cube_header")
cls.cube_header = fits.Header.fromtextfile(cube_header)
slice_header = get_pkg_data_filename("data/slice_header")
cls.slice_header = fits.Header.fromtextfile(slice_header)
def teardown_method(self, method):
plt.close("all")
class TestBasic(BaseImageTests):
@figure_test
def test_tight_layout(self):
# Check that tight_layout works on a WCSAxes.
fig = plt.figure(figsize=(8, 6))
for i in (1, 2):
fig.add_subplot(2, 1, i, projection=WCS(self.msx_header))
fig.tight_layout()
return fig
@figure_test
def test_image_plot(self):
# Test for plotting image and also setting values of ticks
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header), aspect="equal"
)
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_ticks([-0.30, 0.0, 0.20] * u.degree, size=5, width=1)
return fig
@figure_test
def test_axes_off(self):
# Test for turning the axes off
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header))
ax.imshow(np.arange(12).reshape((3, 4)))
ax.set_axis_off()
return fig
@figure_test
@pytest.mark.parametrize("axisbelow", [True, False, "line"])
def test_axisbelow(self, axisbelow):
# Test that tick marks, labels, and gridlines are drawn with the
# correct zorder controlled by the axisbelow property.
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header), aspect="equal"
)
ax.set_axisbelow(axisbelow)
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_ticks([-0.30, 0.0, 0.20] * u.degree, size=5, width=1)
ax.grid()
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
# Add an image (default zorder=0).
ax.imshow(np.zeros((64, 64)))
# Add a patch (default zorder=1).
r = Rectangle((30.0, 50.0), 60.0, 50.0, facecolor="green", edgecolor="red")
ax.add_patch(r)
# Add a line (default zorder=2).
ax.plot([32, 128], [32, 128], linewidth=10)
return fig
@figure_test
def test_contour_overlay(self):
# Test for overlaying contours on images
path = get_pkg_data_filename("galactic_center/gc_msx_e.fits")
with fits.open(path) as pf:
data = pf[0].data
wcs_msx = WCS(self.msx_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect="equal",
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
# Overplot contour
ax.contour(
data,
transform=ax.get_transform(wcs_msx),
colors="orange",
levels=[2.5e-5, 5e-5, 1.0e-4],
)
ax.coords[0].set_ticks(size=5, width=1)
ax.coords[1].set_ticks(size=5, width=1)
ax.set_xlim(0.0, 720.0)
ax.set_ylim(0.0, 720.0)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_contourf_overlay(self):
# Test for overlaying contours on images
path = get_pkg_data_filename("galactic_center/gc_msx_e.fits")
with fits.open(path) as pf:
data = pf[0].data
wcs_msx = WCS(self.msx_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect="equal",
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
# Overplot contour
ax.contourf(
data, transform=ax.get_transform(wcs_msx), levels=[2.5e-5, 5e-5, 1.0e-4]
)
ax.coords[0].set_ticks(size=5, width=1)
ax.coords[1].set_ticks(size=5, width=1)
ax.set_xlim(0.0, 720.0)
ax.set_ylim(0.0, 720.0)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_overlay_features_image(self):
# Test for overlaying grid, changing format of ticks, setting spacing
# and number of ticks
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.25, 0.25, 0.65, 0.65], projection=WCS(self.msx_header), aspect="equal"
)
# Change the format of the ticks
ax.coords[0].set_major_formatter("dd:mm:ss")
ax.coords[1].set_major_formatter("dd:mm:ss.ssss")
# Overlay grid on image
ax.grid(color="red", alpha=1.0, lw=1, linestyle="dashed")
# Set the spacing of ticks on the 'glon' axis to 4 arcsec
ax.coords["glon"].set_ticks(spacing=4 * u.arcsec, size=5, width=1)
# Set the number of ticks on the 'glat' axis to 9
ax.coords["glat"].set_ticks(number=9, size=5, width=1)
# Set labels on axes
ax.coords["glon"].set_axislabel("Galactic Longitude", minpad=1.6)
ax.coords["glat"].set_axislabel("Galactic Latitude", minpad=-0.75)
# Change the frame linewidth and color
ax.coords.frame.set_color("red")
ax.coords.frame.set_linewidth(2)
assert ax.coords.frame.get_color() == "red"
assert ax.coords.frame.get_linewidth() == 2
return fig
@figure_test
def test_curvilinear_grid_patches_image(self):
# Overlay curvilinear grid and patches on image
fig = plt.figure(figsize=(8, 8))
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8], projection=WCS(self.rosat_header), aspect="equal"
)
ax.set_xlim(-0.5, 479.5)
ax.set_ylim(-0.5, 239.5)
ax.grid(color="black", alpha=1.0, lw=1, linestyle="dashed")
p = Circle((300, 100), radius=40, ec="yellow", fc="none")
ax.add_patch(p)
p = Circle(
(30.0, 20.0),
radius=20.0,
ec="orange",
fc="none",
transform=ax.get_transform("world"),
)
ax.add_patch(p)
p = Circle(
(60.0, 50.0),
radius=20.0,
ec="red",
fc="none",
transform=ax.get_transform("fk5"),
)
ax.add_patch(p)
p = Circle(
(40.0, 60.0),
radius=20.0,
ec="green",
fc="none",
transform=ax.get_transform("galactic"),
)
ax.add_patch(p)
return fig
@figure_test
def test_cube_slice_image(self):
# Test for cube slicing
fig = plt.figure()
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, "y", "x"),
aspect="equal",
)
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[2].set_axislabel("Velocity m/s")
ax.coords[1].set_ticks(spacing=0.2 * u.deg, width=1)
ax.coords[2].set_ticks(spacing=400 * u.m / u.s, width=1)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[2].set_ticklabel(exclude_overlapping=True)
ax.coords[0].grid(grid_type="contours", color="purple", linestyle="solid")
ax.coords[1].grid(grid_type="contours", color="orange", linestyle="solid")
ax.coords[2].grid(grid_type="contours", color="red", linestyle="solid")
return fig
@figure_test
def test_cube_slice_image_lonlat(self):
# Test for cube slicing. Here we test with longitude and latitude since
# there is some longitude-specific code in _update_grid_contour.
fig = plt.figure()
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=("x", "y", 50),
aspect="equal",
)
ax.set_xlim(-0.5, 106.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[0].grid(grid_type="contours", color="blue", linestyle="solid")
ax.coords[1].grid(grid_type="contours", color="red", linestyle="solid")
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_plot_coord(self):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect="equal",
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(266 * u.deg, -29 * u.deg)
lines = ax.plot_coord(c, "o")
# Test that plot_coord returns the results from ax.plot
assert isinstance(lines, list)
assert isinstance(lines[0], matplotlib.lines.Line2D)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_scatter_coord(self):
from matplotlib.collections import PathCollection
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect="equal",
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(266 * u.deg, -29 * u.deg)
sc = ax.scatter_coord(c, marker="o")
# Test that plot_coord returns the results from ax.plot
assert isinstance(sc, PathCollection)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_plot_line(self):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect="equal",
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord([266, 266.8] * u.deg, [-29, -28.9] * u.deg)
ax.plot_coord(c)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_changed_axis_units(self):
# Test to see if changing the units of axis works
fig = plt.figure()
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, "y", "x"),
aspect="equal",
)
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[0].set_ticks_position("")
ax.coords[0].set_ticklabel_position("")
ax.coords[0].set_axislabel_position("")
ax.coords[1].set_ticks_position("lr")
ax.coords[1].set_ticklabel_position("l")
ax.coords[1].set_axislabel_position("l")
ax.coords[2].set_ticks_position("bt")
ax.coords[2].set_ticklabel_position("b")
ax.coords[2].set_axislabel_position("b")
ax.coords[2].set_major_formatter("x.xx")
ax.coords[2].set_format_unit(u.km / u.s)
ax.coords[2].set_axislabel("Velocity km/s")
ax.coords[1].set_ticks(width=1)
ax.coords[2].set_ticks(width=1)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[2].set_ticklabel(exclude_overlapping=True)
return fig
@figure_test
def test_minor_ticks(self):
# Test for drawing minor ticks
fig = plt.figure()
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, "y", "x"),
aspect="equal",
)
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[0].set_ticks_position("")
ax.coords[0].set_ticklabel_position("")
ax.coords[0].set_axislabel_position("")
ax.coords[1].set_ticks_position("lr")
ax.coords[1].set_ticklabel_position("l")
ax.coords[1].set_axislabel_position("l")
ax.coords[2].set_ticks_position("bt")
ax.coords[2].set_ticklabel_position("b")
ax.coords[2].set_axislabel_position("b")
ax.coords[2].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[2].display_minor_ticks(True)
ax.coords[1].display_minor_ticks(True)
ax.coords[2].set_minor_frequency(3)
ax.coords[1].set_minor_frequency(10)
return fig
@figure_test
def test_ticks_labels(self):
fig = plt.figure(figsize=(6, 6))
ax = WCSAxes(fig, [0.1, 0.1, 0.7, 0.7], wcs=None)
fig.add_axes(ax)
ax.set_xlim(-0.5, 2)
ax.set_ylim(-0.5, 2)
ax.coords[0].set_ticks(size=10, color="blue", alpha=0.2, width=1)
ax.coords[1].set_ticks(size=20, color="red", alpha=0.9, width=1)
ax.coords[0].set_ticks_position("all")
ax.coords[1].set_ticks_position("all")
ax.coords[0].set_axislabel("X-axis", size=20)
ax.coords[1].set_axislabel(
"Y-axis",
color="green",
size=25,
weight="regular",
style="normal",
family="cmtt10",
)
ax.coords[0].set_axislabel_position("t")
ax.coords[1].set_axislabel_position("r")
ax.coords[0].set_ticklabel(
color="purple",
size=15,
alpha=1,
weight="light",
style="normal",
family="cmss10",
)
ax.coords[1].set_ticklabel(
color="black", size=18, alpha=0.9, weight="bold", family="cmr10"
)
ax.coords[0].set_ticklabel_position("all")
ax.coords[1].set_ticklabel_position("r")
return fig
@figure_test
def test_rcparams(self):
# Test custom rcParams
with rc_context(
{
"axes.labelcolor": "purple",
"axes.labelsize": 14,
"axes.labelweight": "bold",
"axes.linewidth": 3,
"axes.facecolor": "0.5",
"axes.edgecolor": "green",
"xtick.color": "red",
"xtick.labelsize": 8,
"xtick.direction": "in",
"xtick.minor.visible": True,
"xtick.minor.size": 5,
"xtick.major.size": 20,
"xtick.major.width": 3,
"xtick.major.pad": 10,
"grid.color": "blue",
"grid.linestyle": ":",
"grid.linewidth": 1,
"grid.alpha": 0.5,
}
):
fig = plt.figure(figsize=(6, 6))
ax = WCSAxes(fig, [0.15, 0.1, 0.7, 0.7], wcs=None)
fig.add_axes(ax)
ax.set_xlim(-0.5, 2)
ax.set_ylim(-0.5, 2)
ax.grid()
ax.set_xlabel("X label")
ax.set_ylabel("Y label")
ax.coords[0].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
return fig
@figure_test
def test_tick_angles(self):
# Test that tick marks point in the correct direction, even when the
# axes limits extend only over a few FITS pixels. Addresses #45, #46.
w = WCS()
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
w.wcs.crval = [90, 70]
w.wcs.cdelt = [16, 16]
w.wcs.crpix = [1, 1]
w.wcs.radesys = "ICRS"
w.wcs.equinox = 2000.0
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=w)
ax.set_xlim(1, -1)
ax.set_ylim(-1, 1)
ax.grid(color="gray", alpha=0.5, linestyle="solid")
ax.coords["ra"].set_ticks(color="red", size=20)
ax.coords["dec"].set_ticks(color="red", size=20)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_tick_angles_non_square_axes(self):
# Test that tick marks point in the correct direction, even when the
# axes limits extend only over a few FITS pixels, and the axes are
# non-square.
w = WCS()
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
w.wcs.crval = [90, 70]
w.wcs.cdelt = [16, 16]
w.wcs.crpix = [1, 1]
w.wcs.radesys = "ICRS"
w.wcs.equinox = 2000.0
fig = plt.figure(figsize=(6, 3))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=w)
ax.set_xlim(1, -1)
ax.set_ylim(-1, 1)
ax.grid(color="gray", alpha=0.5, linestyle="solid")
ax.coords["ra"].set_ticks(color="red", size=20)
ax.coords["dec"].set_ticks(color="red", size=20)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_set_coord_type(self):
# Test for setting coord_type
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes(
[0.2, 0.2, 0.6, 0.6], projection=WCS(self.msx_header), aspect="equal"
)
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_coord_type("scalar")
ax.coords[1].set_coord_type("scalar")
ax.coords[0].set_major_formatter("x.xxx")
ax.coords[1].set_major_formatter("x.xxx")
ax.coords[0].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
return fig
@figure_test
def test_ticks_regression(self):
# Regression test for a bug that caused ticks aligned exactly with a
# sampled frame point to not appear. This also checks that tick labels
# don't get added more than once, and that no error occurs when e.g.
# the top part of the frame is all at the same coordinate as one of the
# potential ticks (which causes the tick angle calculation to return
# NaN).
wcs = WCS(self.slice_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect="auto")
limits = wcs.wcs_world2pix([0, 0], [35e3, 80e3], 0)[1]
ax.set_ylim(*limits)
ax.coords[0].set_ticks(spacing=0.002 * u.deg)
ax.coords[1].set_ticks(spacing=5 * u.km / u.s)
ax.coords[0].set_ticklabel(alpha=0.5) # to see multiple labels
ax.coords[1].set_ticklabel(alpha=0.5)
ax.coords[0].set_ticklabel_position("all")
ax.coords[1].set_ticklabel_position("all")
return fig
@figure_test
def test_axislabels_regression(self):
# Regression test for a bug that meant that if tick labels were made
# invisible with ``set_visible(False)``, they were still added to the
# list of bounding boxes for tick labels, but with default values of 0
# to 1, which caused issues.
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect="auto")
ax.coords[0].set_axislabel("Label 1")
ax.coords[1].set_axislabel("Label 2")
ax.coords[1].set_axislabel_visibility_rule("always")
ax.coords[1].ticklabels.set_visible(False)
return fig
@figure_test(savefig_kwargs={"bbox_inches": "tight"})
def test_noncelestial_angular(self, tmp_path):
# Regression test for a bug that meant that when passing a WCS that had
# angular axes and using set_coord_type to set the coordinates to
# longitude/latitude, but where the WCS wasn't recognized as celestial,
# the WCS units are not converted to deg, so we can't assume that
# transform will always return degrees.
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["solar-x", "solar-y"]
wcs.wcs.cunit = ["arcsec", "arcsec"]
fig = plt.figure(figsize=(3, 3))
ax = fig.add_subplot(1, 1, 1, projection=wcs)
ax.imshow(np.zeros([1024, 1024]), origin="lower")
ax.coords[0].set_coord_type("longitude", coord_wrap=180 * u.deg)
ax.coords[1].set_coord_type("latitude")
ax.coords[0].set_major_formatter("s.s")
ax.coords[1].set_major_formatter("s.s")
ax.coords[0].set_format_unit(u.arcsec, show_decimal_unit=False)
ax.coords[1].set_format_unit(u.arcsec, show_decimal_unit=False)
ax.grid(color="white", ls="solid")
# Force drawing (needed for format_coord)
fig.savefig(tmp_path / "nothing")
assert ax.format_coord(512, 512) == "513.0 513.0 (world)"
return fig
@figure_test
def test_patches_distortion(self, tmp_path):
# Check how patches get distorted (and make sure that scatter markers
# and SphericalCircle don't)
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect="equal")
# Pixel coordinates
r = Rectangle((30.0, 50.0), 60.0, 50.0, edgecolor="green", facecolor="none")
ax.add_patch(r)
# FK5 coordinates
r = Rectangle(
(266.4, -28.9),
0.3,
0.3,
edgecolor="cyan",
facecolor="none",
transform=ax.get_transform("fk5"),
)
ax.add_patch(r)
# FK5 coordinates
c = Circle(
(266.4, -29.1),
0.15,
edgecolor="magenta",
facecolor="none",
transform=ax.get_transform("fk5"),
)
ax.add_patch(c)
# Pixel coordinates
ax.scatter(
[40, 100, 130],
[30, 130, 60],
s=100,
edgecolor="red",
facecolor=(1, 0, 0, 0.5),
)
# World coordinates (should not be distorted)
ax.scatter(
266.78238,
-28.769255,
transform=ax.get_transform("fk5"),
s=300,
edgecolor="red",
facecolor="none",
)
# World coordinates (should not be distorted)
r1 = SphericalCircle(
(266.4 * u.deg, -29.1 * u.deg),
0.15 * u.degree,
edgecolor="purple",
facecolor="none",
transform=ax.get_transform("fk5"),
)
ax.add_patch(r1)
r2 = SphericalCircle(
SkyCoord(266.4 * u.deg, -29.1 * u.deg),
0.15 * u.degree,
edgecolor="purple",
facecolor="none",
transform=ax.get_transform("fk5"),
)
with pytest.warns(
AstropyUserWarning,
match="Received `center` of representation type "
"<class 'astropy.coordinates.representation.CartesianRepresentation'> "
"will be converted to SphericalRepresentation",
):
r3 = SphericalCircle(
SkyCoord(
x=-0.05486461,
y=-0.87204803,
z=-0.48633538,
representation_type="cartesian",
),
0.15 * u.degree,
edgecolor="purple",
facecolor="none",
transform=ax.get_transform("fk5"),
)
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticklabel_visible(False)
# Test to verify that SphericalCircle works irrespective of whether
# the input(center) is a tuple or a SkyCoord object.
assert (r1.get_xy() == r2.get_xy()).all()
assert np.allclose(r1.get_xy(), r3.get_xy())
assert np.allclose(r2.get_xy()[0], [266.4, -29.25])
return fig
@figure_test
def test_quadrangle(self, tmp_path):
# Test that Quadrangle can have curved edges while Rectangle does not
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect="equal")
ax.set_xlim(0, 10000)
ax.set_ylim(-10000, 0)
# Add a quadrangle patch (100 degrees by 20 degrees)
q = Quadrangle(
(255, -70) * u.deg,
100 * u.deg,
20 * u.deg,
label="Quadrangle",
edgecolor="blue",
facecolor="none",
transform=ax.get_transform("icrs"),
)
ax.add_patch(q)
# Add a rectangle patch (100 degrees by 20 degrees)
r = Rectangle(
(255, -70),
100,
20,
label="Rectangle",
edgecolor="red",
facecolor="none",
linestyle="--",
transform=ax.get_transform("icrs"),
)
ax.add_patch(r)
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticklabel_visible(False)
return fig
@figure_test
def test_beam_shape_from_args(self, tmp_path):
# Test for adding the beam shape with the beam parameters as arguments
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(4, 3))
ax = fig.add_axes([0.2, 0.2, 0.6, 0.6], projection=wcs, aspect="equal")
ax.set_xlim(-10, 10)
ax.set_ylim(-10, 10)
add_beam(
ax,
major=2 * u.arcmin,
minor=1 * u.arcmin,
angle=-30 * u.degree,
corner="bottom right",
frame=True,
borderpad=0.0,
pad=1.0,
color="black",
)
return fig
@figure_test
def test_beam_shape_from_header(self, tmp_path):
# Test for adding the beam shape with the beam parameters from a header
hdr = self.msx_header
hdr["BMAJ"] = (2 * u.arcmin).to(u.degree).value
hdr["BMIN"] = (1 * u.arcmin).to(u.degree).value
hdr["BPA"] = 30.0
wcs = WCS(hdr)
fig = plt.figure(figsize=(4, 3))
ax = fig.add_axes([0.2, 0.2, 0.6, 0.6], projection=wcs, aspect="equal")
ax.set_xlim(-10, 10)
ax.set_ylim(-10, 10)
add_beam(ax, header=hdr)
return fig
@figure_test
def test_scalebar(self, tmp_path):
# Test for adding a scale bar
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(4, 3))
ax = fig.add_axes([0.2, 0.2, 0.6, 0.6], projection=wcs, aspect="equal")
ax.set_xlim(-10, 10)
ax.set_ylim(-10, 10)
add_scalebar(
ax,
2 * u.arcmin,
label="2'",
corner="top right",
borderpad=1.0,
label_top=True,
)
return fig
@figure_test
def test_elliptical_frame(self):
# Regression test for a bug (astropy/astropy#6063) that caused labels to
# be incorrectly simplified.
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(5, 3))
fig.add_axes([0.2, 0.2, 0.6, 0.6], projection=wcs, frame_class=EllipticalFrame)
return fig
@figure_test
def test_hms_labels(self):
# This tests the apparance of the hms superscripts in tick labels
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes(
[0.3, 0.2, 0.65, 0.6], projection=WCS(self.twoMASS_k_header), aspect="equal"
)
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.5, 0.5)
ax.coords[0].set_ticks(spacing=0.2 * 15 * u.arcsec)
return fig
@figure_test(style={"text.usetex": True})
def test_latex_labels(self):
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes(
[0.3, 0.2, 0.65, 0.6], projection=WCS(self.twoMASS_k_header), aspect="equal"
)
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.5, 0.5)
ax.coords[0].set_ticks(spacing=0.2 * 15 * u.arcsec)
return fig
@figure_test
def test_tick_params(self):
# This is a test to make sure that tick_params works correctly. We try
# and test as much as possible with a single reference image.
wcs = WCS()
wcs.wcs.ctype = ["lon", "lat"]
fig = plt.figure(figsize=(6, 6))
# The first subplot tests:
# - that plt.tick_params works
# - that by default both axes are changed
# - changing the tick direction and appearance, the label appearance and padding
ax = fig.add_subplot(2, 2, 1, projection=wcs)
plt.tick_params(
direction="in",
length=20,
width=5,
pad=6,
labelsize=6,
color="red",
labelcolor="blue",
)
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
# The second subplot tests:
# - that specifying grid parameters doesn't actually cause the grid to
# be shown (as expected)
# - that axis= can be given integer coordinates or their string name
# - that the tick positioning works (bottom/left/top/right)
# Make sure that we can pass things that can index coords
ax = fig.add_subplot(2, 2, 2, projection=wcs)
plt.tick_params(
axis=0,
direction="in",
length=20,
width=5,
pad=4,
labelsize=6,
color="red",
labelcolor="blue",
bottom=True,
grid_color="purple",
)
plt.tick_params(
axis="lat",
direction="out",
labelsize=8,
color="blue",
labelcolor="purple",
left=True,
right=True,
grid_color="red",
)
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
# The third subplot tests:
# - that ax.tick_params works
# - that the grid has the correct settings once shown explicitly
# - that we can use axis='x' and axis='y'
ax = fig.add_subplot(2, 2, 3, projection=wcs)
ax.tick_params(
axis="x",
direction="in",
length=20,
width=5,
pad=20,
labelsize=6,
color="red",
labelcolor="blue",
bottom=True,
grid_color="purple",
)
ax.tick_params(
axis="y",
direction="out",
labelsize=8,
color="blue",
labelcolor="purple",
left=True,
right=True,
grid_color="red",
)
plt.grid()
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
# The final subplot tests:
# - that we can use tick_params on a specific coordinate
# - that the label positioning can be customized
# - that the colors argument works
# - that which='minor' works
ax = fig.add_subplot(2, 2, 4, projection=wcs)
ax.coords[0].tick_params(
length=4,
pad=2,
colors="orange",
labelbottom=True,
labeltop=True,
labelsize=10,
)
ax.coords[1].display_minor_ticks(True)
ax.coords[1].tick_params(which="minor", length=6)
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
return fig
@pytest.fixture
def wave_wcs_1d():
wcs = WCS(naxis=1)
wcs.wcs.ctype = ["WAVE"]
wcs.wcs.cunit = ["m"]
wcs.wcs.crpix = [1]
wcs.wcs.cdelt = [5]
wcs.wcs.crval = [45]
wcs.wcs.set()
return wcs
@figure_test
def test_1d_plot_1d_wcs(wave_wcs_1d):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=wave_wcs_1d)
(lines,) = ax.plot([10, 12, 14, 12, 10])
ax.set_xlabel("this is the x-axis")
ax.set_ylabel("this is the y-axis")
return fig
@figure_test
def test_1d_plot_1d_wcs_format_unit(wave_wcs_1d):
"""
This test ensures that the format unit is updated and displayed for both
the axis ticks and default axis labels.
"""
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=wave_wcs_1d)
(lines,) = ax.plot([10, 12, 14, 12, 10])
ax.coords[0].set_format_unit("nm")
return fig
@pytest.fixture
def spatial_wcs_2d():
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["GLON-TAN", "GLAT-TAN"]
wcs.wcs.crpix = [3.0] * 2
wcs.wcs.cdelt = [15] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
return wcs
@figure_test
def test_1d_plot_2d_wcs_correlated(spatial_wcs_2d):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=spatial_wcs_2d, slices=("x", 0))
(lines,) = ax.plot([10, 12, 14, 12, 10], "-o", color="orange")
ax.coords["glon"].set_ticks(color="red")
ax.coords["glon"].set_ticklabel(color="red")
ax.coords["glon"].grid(color="red")
ax.coords["glat"].set_ticks(color="blue")
ax.coords["glat"].set_ticklabel(color="blue")
ax.coords["glat"].grid(color="blue")
return fig
@pytest.fixture
def spatial_wcs_2d_small_angle():
"""
This WCS has an almost linear correlation between the pixel and world axes
close to the reference pixel.
"""
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["HPLN-TAN", "HPLT-TAN"]
wcs.wcs.crpix = [3.0] * 2
wcs.wcs.cdelt = [10 / 3600, 5 / 3600]
wcs.wcs.crval = [0] * 2
wcs.wcs.set()
return wcs
@pytest.mark.parametrize(
"slices, bottom_axis",
[
# Remember SLLWCS takes slices in array order
(np.s_[0, :], "custom:pos.helioprojective.lon"),
(np.s_[:, 0], "custom:pos.helioprojective.lat"),
],
)
@figure_test
def test_1d_plot_1d_sliced_low_level_wcs(
spatial_wcs_2d_small_angle, slices, bottom_axis
):
"""
Test that a SLLWCS through a coupled 2D WCS plots as line OK.
"""
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=spatial_wcs_2d_small_angle[slices])
(lines,) = ax.plot([10, 12, 14, 12, 10], "-o", color="orange")
# Draw to trigger rendering the ticks.
plt.draw()
assert ax.coords[bottom_axis].ticks.get_visible_axes() == ["b"]
return fig
@pytest.mark.parametrize(
"slices, bottom_axis", [(("x", 0), "hpln"), ((0, "x"), "hplt")]
)
@figure_test
def test_1d_plot_put_varying_axis_on_bottom_lon(
spatial_wcs_2d_small_angle, slices, bottom_axis
):
"""
When we plot a 1D slice through spatial axes, we want to put the axis which
actually changes on the bottom.
For example an aligned wcs, pixel grid where you plot a lon slice through a
lat axis, you would end up with no ticks on the bottom as the lon doesn't
change, and a set of lat ticks on the top because it does but it's the
correlated axis not the actual one you are plotting against.
"""
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=spatial_wcs_2d_small_angle, slices=slices)
(lines,) = ax.plot([10, 12, 14, 12, 10], "-o", color="orange")
# Draw to trigger rendering the ticks.
plt.draw()
assert ax.coords[bottom_axis].ticks.get_visible_axes() == ["b"]
return fig
@figure_test
def test_allsky_labels_wrap():
# Regression test for a bug that caused some tick labels to not be shown
# when looking at all-sky maps in the case where coord_wrap < 360
fig = plt.figure(figsize=(4, 4))
icen = 0
for ctype in [("GLON-CAR", "GLAT-CAR"), ("HGLN-CAR", "HGLT-CAR")]:
for cen in [0, 90, 180, 270]:
icen += 1
wcs = WCS(naxis=2)
wcs.wcs.ctype = ctype
wcs.wcs.crval = cen, 0
wcs.wcs.crpix = 360.5, 180.5
wcs.wcs.cdelt = -0.5, 0.5
ax = fig.add_subplot(8, 1, icen, projection=wcs)
ax.set_xlim(-0.5, 719.5)
ax.coords[0].set_ticks(spacing=50 * u.deg)
ax.coords[0].set_ticks_position("b")
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
ax.coords[1].set_ticklabel_visible(False)
ax.coords[1].set_ticks_visible(False)
fig.subplots_adjust(hspace=2, left=0.05, right=0.95, bottom=0.1, top=0.95)
return fig
@figure_test
def test_tickable_gridlines():
wcs = WCS(
{
"naxis": 2,
"naxis1": 360,
"naxis2": 180,
"crpix1": 180.5,
"crpix2": 90.5,
"cdelt1": -1,
"cdelt2": 1,
"ctype1": "RA---CAR",
"ctype2": "DEC--CAR",
}
)
fig = Figure()
ax = fig.add_subplot(projection=wcs)
ax.set_xlim(-0.5, 360 - 0.5)
ax.set_ylim(-0.5, 150 - 0.5)
lon, lat = ax.coords
lon.grid()
lat.grid()
overlay = ax.get_coords_overlay("galactic")
overlay[0].set_ticks(spacing=30 * u.deg)
overlay[1].set_ticks(spacing=30 * u.deg)
# Test both single-character and multi-character names
overlay[1].add_tickable_gridline("g", -30 * u.deg)
overlay[0].add_tickable_gridline("const-glon", 30 * u.deg)
overlay[0].grid(color="magenta")
overlay[0].set_ticklabel_position("gt")
overlay[0].set_ticklabel(color="magenta")
overlay[0].set_axislabel("Galactic longitude", color="magenta")
overlay[1].grid(color="blue")
overlay[1].set_ticklabel_position(("const-glon", "r"))
overlay[1].set_ticklabel(color="blue")
overlay[1].set_axislabel("Galactic latitude", color="blue")
return fig
|
14fd8f8c42cec7acc29fa743cb7c383a07c3fc1b1275c3bc9c046a02f2f568c9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the Slicing mixin to the NDData class.
from astropy import log
from astropy.wcs.wcsapi import BaseHighLevelWCS # noqa: F401
from astropy.wcs.wcsapi import BaseLowLevelWCS # noqa: F401
from astropy.wcs.wcsapi import HighLevelWCSWrapper, SlicedLowLevelWCS
__all__ = ["NDSlicingMixin"]
class NDSlicingMixin:
"""Mixin to provide slicing on objects using the `NDData`
interface.
The ``data``, ``mask``, ``uncertainty`` and ``wcs`` will be sliced, if
set and sliceable. The ``unit`` and ``meta`` will be untouched. The return
will be a reference and not a copy, if possible.
Examples
--------
Using this Mixin with `~astropy.nddata.NDData`:
>>> from astropy.nddata import NDData, NDSlicingMixin
>>> class NDDataSliceable(NDSlicingMixin, NDData):
... pass
Slicing an instance containing data::
>>> nd = NDDataSliceable([1,2,3,4,5])
>>> nd[1:3]
NDDataSliceable([2, 3])
Also the other attributes are sliced for example the ``mask``::
>>> import numpy as np
>>> mask = np.array([True, False, True, True, False])
>>> nd2 = NDDataSliceable(nd, mask=mask)
>>> nd2slc = nd2[1:3]
>>> nd2slc[nd2slc.mask]
NDDataSliceable([3])
Be aware that changing values of the sliced instance will change the values
of the original::
>>> nd3 = nd2[1:3]
>>> nd3.data[0] = 100
>>> nd2
NDDataSliceable([ 1, 100, 3, 4, 5])
See also
--------
NDDataRef
NDDataArray
"""
def __getitem__(self, item):
# Abort slicing if the data is a single scalar.
if self.data.shape == ():
raise TypeError("scalars cannot be sliced.")
# Let the other methods handle slicing.
kwargs = self._slice(item)
return self.__class__(**kwargs)
def _slice(self, item):
"""Collects the sliced attributes and passes them back as `dict`.
It passes uncertainty, mask and wcs to their appropriate ``_slice_*``
method, while ``meta`` and ``unit`` are simply taken from the original.
The data is assumed to be sliceable and is sliced directly.
When possible the return should *not* be a copy of the data but a
reference.
Parameters
----------
item : slice
The slice passed to ``__getitem__``.
Returns
-------
dict :
Containing all the attributes after slicing - ready to
use them to create ``self.__class__.__init__(**kwargs)`` in
``__getitem__``.
"""
kwargs = {}
kwargs["data"] = self.data[item]
# Try to slice some attributes
kwargs["uncertainty"] = self._slice_uncertainty(item)
kwargs["mask"] = self._slice_mask(item)
kwargs["wcs"] = self._slice_wcs(item)
# Attributes which are copied and not intended to be sliced
kwargs["unit"] = self.unit
kwargs["meta"] = self.meta
return kwargs
def _slice_uncertainty(self, item):
if self.uncertainty is None:
return None
try:
return self.uncertainty[item]
except TypeError:
# Catching TypeError in case the object has no __getitem__ method.
# But let IndexError raise.
log.info("uncertainty cannot be sliced.")
return self.uncertainty
def _slice_mask(self, item):
if self.mask is None:
return None
try:
return self.mask[item]
except TypeError:
log.info("mask cannot be sliced.")
return self.mask
def _slice_wcs(self, item):
if self.wcs is None:
return None
try:
llwcs = SlicedLowLevelWCS(self.wcs.low_level_wcs, item)
return HighLevelWCSWrapper(llwcs)
except Exception as err:
self._handle_wcs_slicing_error(err, item)
# Implement this in a method to allow subclasses to customise the error.
def _handle_wcs_slicing_error(self, err, item):
raise ValueError(
f"Slicing the WCS object with the slice '{item}' "
"failed, if you want to slice the NDData object without the WCS, you "
"can remove by setting `NDData.wcs = None` and then retry."
) from err
|
d391ccfe4744e2db57574d8fcf6f592925f8b1d68cc11274c79b4ce1b5ab3f06 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the Arithmetic mixin to the NDData class.
import warnings
from copy import deepcopy
import numpy as np
from astropy.nddata.nduncertainty import NDUncertainty
from astropy.units import dimensionless_unscaled
from astropy.utils import format_doc, sharedmethod
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ["NDArithmeticMixin"]
# Global so it doesn't pollute the class dict unnecessarily:
# Docstring templates for add, subtract, multiply, divide methods.
_arit_doc = """
Performs {name} by evaluating ``self`` {op} ``operand``.
Parameters
----------
operand, operand2 : `NDData`-like instance
If ``operand2`` is ``None`` or not given it will perform the operation
``self`` {op} ``operand``.
If ``operand2`` is given it will perform ``operand`` {op} ``operand2``.
If the method was called on a class rather than on the instance
``operand2`` must be given.
propagate_uncertainties : `bool` or ``None``, optional
If ``None`` the result will have no uncertainty. If ``False`` the
result will have a copied version of the first operand that has an
uncertainty. If ``True`` the result will have a correctly propagated
uncertainty from the uncertainties of the operands but this assumes
that the uncertainties are `NDUncertainty`-like. Default is ``True``.
.. versionchanged:: 1.2
This parameter must be given as keyword-parameter. Using it as
positional parameter is deprecated.
``None`` was added as valid parameter value.
handle_mask : callable, ``'first_found'`` or ``None``, optional
If ``None`` the result will have no mask. If ``'first_found'`` the
result will have a copied version of the first operand that has a
mask). If it is a callable then the specified callable must
create the results ``mask`` and if necessary provide a copy.
Default is `numpy.logical_or`.
.. versionadded:: 1.2
handle_meta : callable, ``'first_found'`` or ``None``, optional
If ``None`` the result will have no meta. If ``'first_found'`` the
result will have a copied version of the first operand that has a
(not empty) meta. If it is a callable then the specified callable must
create the results ``meta`` and if necessary provide a copy.
Default is ``None``.
.. versionadded:: 1.2
compare_wcs : callable, ``'first_found'`` or ``None``, optional
If ``None`` the result will have no wcs and no comparison between
the wcs of the operands is made. If ``'first_found'`` the
result will have a copied version of the first operand that has a
wcs. If it is a callable then the specified callable must
compare the ``wcs``. The resulting ``wcs`` will be like if ``False``
was given otherwise it raises a ``ValueError`` if the comparison was
not successful. Default is ``'first_found'``.
.. versionadded:: 1.2
uncertainty_correlation : number or `~numpy.ndarray`, optional
The correlation between the two operands is used for correct error
propagation for correlated data as given in:
https://en.wikipedia.org/wiki/Propagation_of_uncertainty#Example_formulas
Default is 0.
.. versionadded:: 1.2
kwargs :
Any other parameter that should be passed to the callables used.
Returns
-------
result : `~astropy.nddata.NDData`-like
The resulting dataset
Notes
-----
If a ``callable`` is used for ``mask``, ``wcs`` or ``meta`` the
callable must accept the corresponding attributes as first two
parameters. If the callable also needs additional parameters these can be
defined as ``kwargs`` and must start with ``"wcs_"`` (for wcs callable) or
``"meta_"`` (for meta callable). This startstring is removed before the
callable is called.
``"first_found"`` can also be abbreviated with ``"ff"``.
"""
class NDArithmeticMixin:
"""
Mixin class to add arithmetic to an NDData object.
When subclassing, be sure to list the superclasses in the correct order
so that the subclass sees NDData as the main superclass. See
`~astropy.nddata.NDDataArray` for an example.
Notes
-----
This class only aims at covering the most common cases so there are certain
restrictions on the saved attributes::
- ``uncertainty`` : has to be something that has a `NDUncertainty`-like
interface for uncertainty propagation
- ``mask`` : has to be something that can be used by a bitwise ``or``
operation.
- ``wcs`` : has to implement a way of comparing with ``=`` to allow
the operation.
But there is a workaround that allows to disable handling a specific
attribute and to simply set the results attribute to ``None`` or to
copy the existing attribute (and neglecting the other).
For example for uncertainties not representing an `NDUncertainty`-like
interface you can alter the ``propagate_uncertainties`` parameter in
:meth:`NDArithmeticMixin.add`. ``None`` means that the result will have no
uncertainty, ``False`` means it takes the uncertainty of the first operand
(if this does not exist from the second operand) as the result's
uncertainty. This behavior is also explained in the docstring for the
different arithmetic operations.
Decomposing the units is not attempted, mainly due to the internal mechanics
of `~astropy.units.Quantity`, so the resulting data might have units like
``km/m`` if you divided for example 100km by 5m. So this Mixin has adopted
this behavior.
Examples
--------
Using this Mixin with `~astropy.nddata.NDData`:
>>> from astropy.nddata import NDData, NDArithmeticMixin
>>> class NDDataWithMath(NDArithmeticMixin, NDData):
... pass
Using it with one operand on an instance::
>>> ndd = NDDataWithMath(100)
>>> ndd.add(20)
NDDataWithMath(120)
Using it with two operand on an instance::
>>> ndd = NDDataWithMath(-4)
>>> ndd.divide(1, ndd)
NDDataWithMath(-0.25)
Using it as classmethod requires two operands::
>>> NDDataWithMath.subtract(5, 4)
NDDataWithMath(1)
"""
def _arithmetic(
self,
operation,
operand,
propagate_uncertainties=True,
handle_mask=np.logical_or,
handle_meta=None,
uncertainty_correlation=0,
compare_wcs="first_found",
**kwds,
):
"""
Base method which calculates the result of the arithmetic operation.
This method determines the result of the arithmetic operation on the
``data`` including their units and then forwards to other methods
to calculate the other properties for the result (like uncertainty).
Parameters
----------
operation : callable
The operation that is performed on the `NDData`. Supported are
`numpy.add`, `numpy.subtract`, `numpy.multiply` and
`numpy.true_divide`.
operand : same type (class) as self
see :meth:`NDArithmeticMixin.add`
propagate_uncertainties : `bool` or ``None``, optional
see :meth:`NDArithmeticMixin.add`
handle_mask : callable, ``'first_found'`` or ``None``, optional
see :meth:`NDArithmeticMixin.add`
handle_meta : callable, ``'first_found'`` or ``None``, optional
see :meth:`NDArithmeticMixin.add`
compare_wcs : callable, ``'first_found'`` or ``None``, optional
see :meth:`NDArithmeticMixin.add`
uncertainty_correlation : ``Number`` or `~numpy.ndarray`, optional
see :meth:`NDArithmeticMixin.add`
kwargs :
Any other parameter that should be passed to the
different :meth:`NDArithmeticMixin._arithmetic_mask` (or wcs, ...)
methods.
Returns
-------
result : ndarray or `~astropy.units.Quantity`
The resulting data as array (in case both operands were without
unit) or as quantity if at least one had a unit.
kwargs : `dict`
The kwargs should contain all the other attributes (besides data
and unit) needed to create a new instance for the result. Creating
the new instance is up to the calling method, for example
:meth:`NDArithmeticMixin.add`.
"""
# Find the appropriate keywords for the appropriate method (not sure
# if data and uncertainty are ever used ...)
kwds2 = {"mask": {}, "meta": {}, "wcs": {}, "data": {}, "uncertainty": {}}
for i in kwds:
splitted = i.split("_", 1)
try:
kwds2[splitted[0]][splitted[1]] = kwds[i]
except KeyError:
raise KeyError(f"Unknown prefix {splitted[0]} for parameter {i}")
kwargs = {}
# First check that the WCS allows the arithmetic operation
if compare_wcs is None:
kwargs["wcs"] = None
elif compare_wcs in ["ff", "first_found"]:
if self.wcs is None:
kwargs["wcs"] = deepcopy(operand.wcs)
else:
kwargs["wcs"] = deepcopy(self.wcs)
else:
kwargs["wcs"] = self._arithmetic_wcs(
operation, operand, compare_wcs, **kwds2["wcs"]
)
# Then calculate the resulting data (which can but not needs to be a
# quantity)
result = self._arithmetic_data(operation, operand, **kwds2["data"])
# Determine the other properties
if propagate_uncertainties is None:
kwargs["uncertainty"] = None
elif not propagate_uncertainties:
if self.uncertainty is None:
kwargs["uncertainty"] = deepcopy(operand.uncertainty)
else:
kwargs["uncertainty"] = deepcopy(self.uncertainty)
else:
kwargs["uncertainty"] = self._arithmetic_uncertainty(
operation,
operand,
result,
uncertainty_correlation,
**kwds2["uncertainty"],
)
# If both are None, there is nothing to do.
if self.psf is not None or operand.psf is not None:
warnings.warn(
f"Not setting psf attribute during {operation.__name__}.",
AstropyUserWarning,
)
if handle_mask is None:
kwargs["mask"] = None
elif handle_mask in ["ff", "first_found"]:
if self.mask is None:
kwargs["mask"] = deepcopy(operand.mask)
else:
kwargs["mask"] = deepcopy(self.mask)
else:
kwargs["mask"] = self._arithmetic_mask(
operation, operand, handle_mask, **kwds2["mask"]
)
if handle_meta is None:
kwargs["meta"] = None
elif handle_meta in ["ff", "first_found"]:
if not self.meta:
kwargs["meta"] = deepcopy(operand.meta)
else:
kwargs["meta"] = deepcopy(self.meta)
else:
kwargs["meta"] = self._arithmetic_meta(
operation, operand, handle_meta, **kwds2["meta"]
)
# Wrap the individual results into a new instance of the same class.
return result, kwargs
def _arithmetic_data(self, operation, operand, **kwds):
"""
Calculate the resulting data.
Parameters
----------
operation : callable
see `NDArithmeticMixin._arithmetic` parameter description.
operand : `NDData`-like instance
The second operand wrapped in an instance of the same class as
self.
kwds :
Additional parameters.
Returns
-------
result_data : ndarray or `~astropy.units.Quantity`
If both operands had no unit the resulting data is a simple numpy
array, but if any of the operands had a unit the return is a
Quantity.
"""
# Do the calculation with or without units
if self.unit is None and operand.unit is None:
result = operation(self.data, operand.data)
elif self.unit is None:
result = operation(
self.data << dimensionless_unscaled, operand.data << operand.unit
)
elif operand.unit is None:
result = operation(
self.data << self.unit, operand.data << dimensionless_unscaled
)
else:
result = operation(self.data << self.unit, operand.data << operand.unit)
return result
def _arithmetic_uncertainty(self, operation, operand, result, correlation, **kwds):
"""
Calculate the resulting uncertainty.
Parameters
----------
operation : callable
see :meth:`NDArithmeticMixin._arithmetic` parameter description.
operand : `NDData`-like instance
The second operand wrapped in an instance of the same class as
self.
result : `~astropy.units.Quantity` or `~numpy.ndarray`
The result of :meth:`NDArithmeticMixin._arithmetic_data`.
correlation : number or `~numpy.ndarray`
see :meth:`NDArithmeticMixin.add` parameter description.
kwds :
Additional parameters.
Returns
-------
result_uncertainty : `NDUncertainty` subclass instance or None
The resulting uncertainty already saved in the same `NDUncertainty`
subclass that ``self`` had (or ``operand`` if self had no
uncertainty). ``None`` only if both had no uncertainty.
"""
# Make sure these uncertainties are NDUncertainties so this kind of
# propagation is possible.
if self.uncertainty is not None and not isinstance(
self.uncertainty, NDUncertainty
):
raise TypeError(
"Uncertainty propagation is only defined for "
"subclasses of NDUncertainty."
)
if operand.uncertainty is not None and not isinstance(
operand.uncertainty, NDUncertainty
):
raise TypeError(
"Uncertainty propagation is only defined for "
"subclasses of NDUncertainty."
)
# Now do the uncertainty propagation
# TODO: There is no enforced requirement that actually forbids the
# uncertainty to have negative entries but with correlation the
# sign of the uncertainty DOES matter.
if self.uncertainty is None and operand.uncertainty is None:
# Neither has uncertainties so the result should have none.
return None
elif self.uncertainty is None:
# Create a temporary uncertainty to allow uncertainty propagation
# to yield the correct results. (issue #4152)
self.uncertainty = operand.uncertainty.__class__(None)
result_uncert = self.uncertainty.propagate(
operation, operand, result, correlation
)
# Delete the temporary uncertainty again.
self.uncertainty = None
return result_uncert
elif operand.uncertainty is None:
# As with self.uncertainty is None but the other way around.
operand.uncertainty = self.uncertainty.__class__(None)
result_uncert = self.uncertainty.propagate(
operation, operand, result, correlation
)
operand.uncertainty = None
return result_uncert
else:
# Both have uncertainties so just propagate.
return self.uncertainty.propagate(operation, operand, result, correlation)
def _arithmetic_mask(self, operation, operand, handle_mask, **kwds):
"""
Calculate the resulting mask.
This is implemented as the piecewise ``or`` operation if both have a
mask.
Parameters
----------
operation : callable
see :meth:`NDArithmeticMixin._arithmetic` parameter description.
By default, the ``operation`` will be ignored.
operand : `NDData`-like instance
The second operand wrapped in an instance of the same class as
self.
handle_mask : callable
see :meth:`NDArithmeticMixin.add`
kwds :
Additional parameters given to ``handle_mask``.
Returns
-------
result_mask : any type
If only one mask was present this mask is returned.
If neither had a mask ``None`` is returned. Otherwise
``handle_mask`` must create (and copy) the returned mask.
"""
# If only one mask is present we need not bother about any type checks
if self.mask is None and operand.mask is None:
return None
elif self.mask is None:
# Make a copy so there is no reference in the result.
return deepcopy(operand.mask)
elif operand.mask is None:
return deepcopy(self.mask)
else:
# Now lets calculate the resulting mask (operation enforces copy)
return handle_mask(self.mask, operand.mask, **kwds)
def _arithmetic_wcs(self, operation, operand, compare_wcs, **kwds):
"""
Calculate the resulting wcs.
There is actually no calculation involved but it is a good place to
compare wcs information of both operands. This is currently not working
properly with `~astropy.wcs.WCS` (which is the suggested class for
storing as wcs property) but it will not break it neither.
Parameters
----------
operation : callable
see :meth:`NDArithmeticMixin._arithmetic` parameter description.
By default, the ``operation`` will be ignored.
operand : `NDData` instance or subclass
The second operand wrapped in an instance of the same class as
self.
compare_wcs : callable
see :meth:`NDArithmeticMixin.add` parameter description.
kwds :
Additional parameters given to ``compare_wcs``.
Raises
------
ValueError
If ``compare_wcs`` returns ``False``.
Returns
-------
result_wcs : any type
The ``wcs`` of the first operand is returned.
"""
# ok, not really arithmetic but we need to check which wcs makes sense
# for the result and this is an ideal place to compare the two WCS,
# too.
# I'll assume that the comparison returned None or False in case they
# are not equal.
if not compare_wcs(self.wcs, operand.wcs, **kwds):
raise ValueError("WCS are not equal.")
return deepcopy(self.wcs)
def _arithmetic_meta(self, operation, operand, handle_meta, **kwds):
"""
Calculate the resulting meta.
Parameters
----------
operation : callable
see :meth:`NDArithmeticMixin._arithmetic` parameter description.
By default, the ``operation`` will be ignored.
operand : `NDData`-like instance
The second operand wrapped in an instance of the same class as
self.
handle_meta : callable
see :meth:`NDArithmeticMixin.add`
kwds :
Additional parameters given to ``handle_meta``.
Returns
-------
result_meta : any type
The result of ``handle_meta``.
"""
# Just return what handle_meta does with both of the metas.
return handle_meta(self.meta, operand.meta, **kwds)
@sharedmethod
@format_doc(_arit_doc, name="addition", op="+")
def add(self, operand, operand2=None, **kwargs):
return self._prepare_then_do_arithmetic(np.add, operand, operand2, **kwargs)
@sharedmethod
@format_doc(_arit_doc, name="subtraction", op="-")
def subtract(self, operand, operand2=None, **kwargs):
return self._prepare_then_do_arithmetic(
np.subtract, operand, operand2, **kwargs
)
@sharedmethod
@format_doc(_arit_doc, name="multiplication", op="*")
def multiply(self, operand, operand2=None, **kwargs):
return self._prepare_then_do_arithmetic(
np.multiply, operand, operand2, **kwargs
)
@sharedmethod
@format_doc(_arit_doc, name="division", op="/")
def divide(self, operand, operand2=None, **kwargs):
return self._prepare_then_do_arithmetic(
np.true_divide, operand, operand2, **kwargs
)
@sharedmethod
def _prepare_then_do_arithmetic(
self_or_cls, operation, operand, operand2, **kwargs
):
"""Intermediate method called by public arithmetic (i.e. ``add``)
before the processing method (``_arithmetic``) is invoked.
.. warning::
Do not override this method in subclasses.
This method checks if it was called as instance or as class method and
then wraps the operands and the result from ``_arithmetic`` in the
appropriate subclass.
Parameters
----------
self_or_cls : instance or class
``sharedmethod`` behaves like a normal method if called on the
instance (then this parameter is ``self``) but like a classmethod
when called on the class (then this parameter is ``cls``).
operations : callable
The operation (normally a numpy-ufunc) that represents the
appropriate action.
operand, operand2, kwargs :
See for example ``add``.
Result
------
result : `~astropy.nddata.NDData`-like
Depending how this method was called either ``self_or_cls``
(called on class) or ``self_or_cls.__class__`` (called on instance)
is the NDData-subclass that is used as wrapper for the result.
"""
# DO NOT OVERRIDE THIS METHOD IN SUBCLASSES.
if isinstance(self_or_cls, NDArithmeticMixin):
# True means it was called on the instance, so self_or_cls is
# a reference to self
cls = self_or_cls.__class__
if operand2 is None:
# Only one operand was given. Set operand2 to operand and
# operand to self so that we call the appropriate method of the
# operand.
operand2 = operand
operand = self_or_cls
else:
# Convert the first operand to the class of this method.
# This is important so that always the correct _arithmetics is
# called later that method.
operand = cls(operand)
else:
# It was used as classmethod so self_or_cls represents the cls
cls = self_or_cls
# It was called on the class so we expect two operands!
if operand2 is None:
raise TypeError(
"operand2 must be given when the method isn't "
"called on an instance."
)
# Convert to this class. See above comment why.
operand = cls(operand)
# At this point operand, operand2, kwargs and cls are determined.
# Let's try to convert operand2 to the class of operand to allows for
# arithmetic operations with numbers, lists, numpy arrays, numpy masked
# arrays, astropy quantities, masked quantities and of other subclasses
# of NDData.
operand2 = cls(operand2)
# Now call the _arithmetics method to do the arithmetic.
result, init_kwds = operand._arithmetic(operation, operand2, **kwargs)
# Return a new class based on the result
return cls(result, **init_kwds)
|
2f9c9192647219aa8ba704de5530125c0879836d1ea0e22e3edd160f741df4c8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from packaging.version import Version
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.nddata import (
CCDData,
Cutout2D,
NoOverlapError,
PartialOverlapError,
add_array,
extract_array,
overlap_slices,
subpixel_indices,
)
from astropy.tests.helper import assert_quantity_allclose
from astropy.wcs import WCS, Sip
from astropy.wcs.utils import proj_plane_pixel_area
test_positions = [
(10.52, 3.12),
(5.62, 12.97),
(31.33, 31.77),
(0.46, 0.94),
(20.45, 12.12),
(42.24, 24.42),
]
test_position_indices = [(0, 3), (0, 2), (4, 1), (4, 2), (4, 3), (3, 4)]
test_slices = [
slice(10.52, 3.12),
slice(5.62, 12.97),
slice(31.33, 31.77),
slice(0.46, 0.94),
slice(20.45, 12.12),
slice(42.24, 24.42),
]
subsampling = 5
test_pos_bad = [(-1, -4), (-2, 0), (6, 2), (6, 6)]
test_nonfinite_positions = [
(np.nan, np.nan),
(np.inf, np.inf),
(1, np.nan),
(np.nan, 2),
(2, -np.inf),
(-np.inf, 3),
]
def test_slices_different_dim():
"""Overlap from arrays with different number of dim is undefined."""
with pytest.raises(ValueError, match=".*the same number of dimensions.*"):
overlap_slices((4, 5, 6), (1, 2), (0, 0))
def test_slices_pos_different_dim():
"""Position must have same dim as arrays."""
with pytest.raises(ValueError, match=".*the same number of dimensions.*"):
overlap_slices((4, 5), (1, 2), (0, 0, 3))
@pytest.mark.parametrize("pos", test_pos_bad)
def test_slices_no_overlap(pos):
"""If there is no overlap between arrays, an error should be raised."""
with pytest.raises(NoOverlapError):
overlap_slices((5, 5), (2, 2), pos)
def test_slices_partial_overlap():
"""Compute a slice for partially overlapping arrays."""
temp = overlap_slices((5,), (3,), (0,))
assert temp == ((slice(0, 2, None),), (slice(1, 3, None),))
temp = overlap_slices((5,), (3,), (0,), mode="partial")
assert temp == ((slice(0, 2, None),), (slice(1, 3, None),))
for pos in [0, 4]:
with pytest.raises(
PartialOverlapError, match=".*Arrays overlap only partially.*"
):
temp = overlap_slices((5,), (3,), (pos,), mode="strict")
def test_slices_edges():
"""
Test overlap_slices when extracting along edges.
"""
slc_lg, slc_sm = overlap_slices((10, 10), (3, 3), (1, 1), mode="strict")
assert slc_lg[0].start == slc_lg[1].start == 0
assert slc_lg[0].stop == slc_lg[1].stop == 3
assert slc_sm[0].start == slc_sm[1].start == 0
assert slc_sm[0].stop == slc_sm[1].stop == 3
slc_lg, slc_sm = overlap_slices((10, 10), (3, 3), (8, 8), mode="strict")
assert slc_lg[0].start == slc_lg[1].start == 7
assert slc_lg[0].stop == slc_lg[1].stop == 10
assert slc_sm[0].start == slc_sm[1].start == 0
assert slc_sm[0].stop == slc_sm[1].stop == 3
# test (0, 0) shape
slc_lg, slc_sm = overlap_slices((10, 10), (0, 0), (0, 0))
assert slc_lg[0].start == slc_lg[0].stop == 0
assert slc_lg[1].start == slc_lg[1].stop == 0
assert slc_sm[0].start == slc_sm[0].stop == 0
assert slc_sm[1].start == slc_sm[1].stop == 0
slc_lg, slc_sm = overlap_slices((10, 10), (0, 0), (5, 5))
assert slc_lg[0].start == slc_lg[0].stop == 5
assert slc_lg[1].start == slc_lg[1].stop == 5
assert slc_sm[0].start == slc_sm[0].stop == 0
assert slc_sm[1].start == slc_sm[1].stop == 0
def test_slices_overlap_wrong_mode():
"""Call overlap_slices with non-existing mode."""
with pytest.raises(ValueError, match="^Mode can be only.*"):
overlap_slices((5,), (3,), (0,), mode="full")
@pytest.mark.parametrize("position", test_nonfinite_positions)
def test_slices_nonfinite_position(position):
"""
A ValueError should be raised if position contains a non-finite
value.
"""
with pytest.raises(ValueError):
overlap_slices((7, 7), (3, 3), position)
def test_extract_array_even_shape_rounding():
"""
Test overlap_slices (via extract_array) for rounding with an
even-shaped extraction.
"""
data = np.arange(10)
shape = (2,)
positions_expected = [
(1.49, (1, 2)),
(1.5, (1, 2)),
(1.501, (1, 2)),
(1.99, (1, 2)),
(2.0, (1, 2)),
(2.01, (2, 3)),
(2.49, (2, 3)),
(2.5, (2, 3)),
(2.501, (2, 3)),
(2.99, (2, 3)),
(3.0, (2, 3)),
(3.01, (3, 4)),
]
for pos, exp in positions_expected:
out = extract_array(data, shape, (pos,), mode="partial")
assert_array_equal(out, exp)
# test negative positions
positions = (-0.99, -0.51, -0.5, -0.49, -0.01, 0)
exp1 = (-99, 0)
exp2 = (0, 1)
expected = [exp1,] * 6 + [
exp2,
]
for pos, exp in zip(positions, expected):
out = extract_array(data, shape, (pos,), mode="partial", fill_value=-99)
assert_array_equal(out, exp)
def test_extract_array_odd_shape_rounding():
"""
Test overlap_slices (via extract_array) for rounding with an
even-shaped extraction.
"""
data = np.arange(10)
shape = (3,)
positions_expected = [
(1.49, (0, 1, 2)),
(1.5, (0, 1, 2)),
(1.501, (1, 2, 3)),
(1.99, (1, 2, 3)),
(2.0, (1, 2, 3)),
(2.01, (1, 2, 3)),
(2.49, (1, 2, 3)),
(2.5, (1, 2, 3)),
(2.501, (2, 3, 4)),
(2.99, (2, 3, 4)),
(3.0, (2, 3, 4)),
(3.01, (2, 3, 4)),
]
for pos, exp in positions_expected:
out = extract_array(data, shape, (pos,), mode="partial")
assert_array_equal(out, exp)
# test negative positions
positions = (-0.99, -0.51, -0.5, -0.49, -0.01, 0)
exp1 = (-99, -99, 0)
exp2 = (-99, 0, 1)
expected = [exp1,] * 3 + [
exp2,
] * 4
for pos, exp in zip(positions, expected):
out = extract_array(data, shape, (pos,), mode="partial", fill_value=-99)
assert_array_equal(out, exp)
def test_extract_array_wrong_mode():
"""Call extract_array with non-existing mode."""
with pytest.raises(
ValueError, match="Valid modes are 'partial', 'trim', and 'strict'."
):
extract_array(np.arange(4), (2,), (0,), mode="full")
def test_extract_array_1d_even():
"""Extract 1 d arrays.
All dimensions are treated the same, so we can test in 1 dim.
"""
assert np.all(
extract_array(np.arange(4), (2,), (0,), fill_value=-99) == np.array([-99, 0])
)
for i in [1, 2, 3]:
assert np.all(extract_array(np.arange(4), (2,), (i,)) == np.array([i - 1, i]))
assert np.all(
extract_array(np.arange(4.0), (2,), (4,), fill_value=np.inf)
== np.array([3, np.inf])
)
def test_extract_array_1d_odd():
"""Extract 1 d arrays.
All dimensions are treated the same, so we can test in 1 dim.
The first few lines test the most error-prone part: Extraction of an
array on the boundaries.
Additional tests (e.g. dtype of return array) are done for the last
case only.
"""
assert np.all(
extract_array(np.arange(4), (3,), (-1,), fill_value=-99)
== np.array([-99, -99, 0])
)
assert np.all(
extract_array(np.arange(4), (3,), (0,), fill_value=-99) == np.array([-99, 0, 1])
)
for i in [1, 2]:
assert np.all(
extract_array(np.arange(4), (3,), (i,)) == np.array([i - 1, i, i + 1])
)
assert np.all(
extract_array(np.arange(4), (3,), (3,), fill_value=-99) == np.array([2, 3, -99])
)
arrayin = np.arange(4.0)
extracted = extract_array(arrayin, (3,), (4,))
assert extracted[0] == 3
assert np.isnan(extracted[1]) # since I cannot use `==` to test for nan
assert extracted.dtype == arrayin.dtype
def test_extract_array_1d():
"""In 1d, shape can be int instead of tuple"""
assert np.all(
extract_array(np.arange(4), 3, (-1,), fill_value=-99) == np.array([-99, -99, 0])
)
assert np.all(
extract_array(np.arange(4), 3, -1, fill_value=-99) == np.array([-99, -99, 0])
)
def test_extract_Array_float():
"""integer is at bin center"""
for a in np.arange(2.51, 3.49, 0.1):
assert np.all(extract_array(np.arange(5), 3, a) == np.array([2, 3, 4]))
def test_extract_array_1d_trim():
"""Extract 1 d arrays.
All dimensions are treated the same, so we can test in 1 dim.
"""
assert np.all(extract_array(np.arange(4), (2,), (0,), mode="trim") == np.array([0]))
for i in [1, 2, 3]:
assert np.all(
extract_array(np.arange(4), (2,), (i,), mode="trim") == np.array([i - 1, i])
)
assert np.all(
extract_array(np.arange(4.0), (2,), (4,), mode="trim") == np.array([3])
)
@pytest.mark.parametrize("mode", ["partial", "trim", "strict"])
def test_extract_array_easy(mode):
"""
Test extract_array utility function.
Test by extracting an array of ones out of an array of zeros.
"""
large_test_array = np.zeros((11, 11))
small_test_array = np.ones((5, 5))
large_test_array[3:8, 3:8] = small_test_array
extracted_array = extract_array(large_test_array, (5, 5), (5, 5), mode=mode)
assert np.all(extracted_array == small_test_array)
def test_extract_array_return_pos():
"""Check that the return position is calculated correctly.
The result will differ by mode. All test here are done in 1d because it's
easier to construct correct test cases.
"""
large_test_array = np.arange(5, dtype=float)
for i in np.arange(-1, 6):
extracted, new_pos = extract_array(
large_test_array, 3, i, mode="partial", return_position=True
)
assert new_pos == (1,)
# Now check an array with an even number
for i, expected in zip([1.49, 1.51, 3], [0.49, 0.51, 1]):
extracted, new_pos = extract_array(
large_test_array, (2,), (i,), mode="strict", return_position=True
)
assert new_pos == (expected,)
# For mode='trim' the answer actually depends
for i, expected in zip(np.arange(-1, 6), (-1, 0, 1, 1, 1, 1, 1)):
extracted, new_pos = extract_array(
large_test_array, (3,), (i,), mode="trim", return_position=True
)
assert new_pos == (expected,)
def test_extract_array_nan_fillvalue():
if Version(np.__version__) >= Version("1.20"):
msg = "fill_value cannot be set to np.nan if the input array has"
with pytest.raises(ValueError, match=msg):
extract_array(
np.ones((10, 10), dtype=int), (5, 5), (1, 1), fill_value=np.nan
)
def test_add_array_odd_shape():
"""
Test add_array utility function.
Test by adding an array of ones out of an array of zeros.
"""
large_test_array = np.zeros((11, 11))
small_test_array = np.ones((5, 5))
large_test_array_ref = large_test_array.copy()
large_test_array_ref[3:8, 3:8] += small_test_array
added_array = add_array(large_test_array, small_test_array, (5, 5))
assert np.all(added_array == large_test_array_ref)
def test_add_array_even_shape():
"""
Test add_array_2D utility function.
Test by adding an array of ones out of an array of zeros.
"""
large_test_array = np.zeros((11, 11))
small_test_array = np.ones((4, 4))
large_test_array_ref = large_test_array.copy()
large_test_array_ref[0:2, 0:2] += small_test_array[2:4, 2:4]
added_array = add_array(large_test_array, small_test_array, (0, 0))
assert np.all(added_array == large_test_array_ref)
def test_add_array_equal_shape():
"""
Test add_array_2D utility function.
Test by adding an array of ones out of an array of zeros.
"""
large_test_array = np.zeros((11, 11))
small_test_array = np.ones((11, 11))
large_test_array_ref = large_test_array.copy()
large_test_array_ref += small_test_array
added_array = add_array(large_test_array, small_test_array, (5, 5))
assert np.all(added_array == large_test_array_ref)
@pytest.mark.parametrize(
("position", "subpixel_index"), zip(test_positions, test_position_indices)
)
def test_subpixel_indices(position, subpixel_index):
"""
Test subpixel_indices utility function.
Test by asserting that the function returns correct results for
given test values.
"""
assert np.all(subpixel_indices(position, subsampling) == subpixel_index)
class TestCutout2D:
def setup_class(self):
self.data = np.arange(20.0).reshape(5, 4)
self.position = SkyCoord("13h11m29.96s -01d19m18.7s", frame="icrs")
wcs = WCS(naxis=2)
rho = np.pi / 3.0
scale = 0.05 / 3600.0
wcs.wcs.cd = [
[scale * np.cos(rho), -scale * np.sin(rho)],
[scale * np.sin(rho), scale * np.cos(rho)],
]
wcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]
wcs.wcs.crval = [
self.position.ra.to_value(u.deg),
self.position.dec.to_value(u.deg),
]
wcs.wcs.crpix = [3, 3]
self.wcs = wcs
# add SIP
sipwcs = wcs.deepcopy()
sipwcs.wcs.ctype = ["RA---TAN-SIP", "DEC--TAN-SIP"]
a = np.array(
[
[0, 0, 5.33092692e-08, 3.73753773e-11, -2.02111473e-13],
[0, 2.44084308e-05, 2.81394789e-11, 5.17856895e-13, 0.0],
[-2.41334657e-07, 1.29289255e-10, 2.35753629e-14, 0.0, 0.0],
[-2.37162007e-10, 5.43714947e-13, 0.0, 0.0, 0.0],
[-2.81029767e-13, 0.0, 0.0, 0.0, 0.0],
]
)
b = np.array(
[
[0, 0, 2.99270374e-05, -2.38136074e-10, 7.23205168e-13],
[0, -1.71073858e-07, 6.31243431e-11, -5.16744347e-14, 0.0],
[6.95458963e-06, -3.08278961e-10, -1.75800917e-13, 0.0, 0.0],
[3.51974159e-11, 5.60993016e-14, 0.0, 0.0, 0.0],
[-5.92438525e-13, 0.0, 0.0, 0.0, 0.0],
]
)
sipwcs.sip = Sip(a, b, None, None, wcs.wcs.crpix)
sipwcs.wcs.set()
self.sipwcs = sipwcs
def test_cutout(self):
sizes = [
3,
3 * u.pixel,
(3, 3),
(3 * u.pixel, 3 * u.pix),
(3.0, 3 * u.pixel),
(2.9, 3.3),
]
for size in sizes:
position = (2.1, 1.9)
c = Cutout2D(self.data, position, size)
assert c.data.shape == (3, 3)
assert c.data[1, 1] == 10
assert c.origin_original == (1, 1)
assert c.origin_cutout == (0, 0)
assert c.input_position_original == position
assert_allclose(c.input_position_cutout, (1.1, 0.9))
assert c.position_original == (2.0, 2.0)
assert c.position_cutout == (1.0, 1.0)
assert c.center_original == (2.0, 2.0)
assert c.center_cutout == (1.0, 1.0)
assert c.bbox_original == ((1, 3), (1, 3))
assert c.bbox_cutout == ((0, 2), (0, 2))
assert c.slices_original == (slice(1, 4), slice(1, 4))
assert c.slices_cutout == (slice(0, 3), slice(0, 3))
def test_size_length(self):
with pytest.raises(ValueError):
Cutout2D(self.data, (2, 2), (1, 1, 1))
def test_size_units(self):
for size in [3 * u.cm, (3, 3 * u.K)]:
with pytest.raises(ValueError):
Cutout2D(self.data, (2, 2), size)
def test_size_pixel(self):
"""
Check size in derived pixel units.
"""
size = 0.3 * u.arcsec / (0.1 * u.arcsec / u.pixel)
c = Cutout2D(self.data, (2, 2), size)
assert c.data.shape == (3, 3)
assert c.data[0, 0] == 5
assert c.slices_original == (slice(1, 4), slice(1, 4))
assert c.slices_cutout == (slice(0, 3), slice(0, 3))
def test_size_angle(self):
c = Cutout2D(self.data, (2, 2), (0.1 * u.arcsec), wcs=self.wcs)
assert c.data.shape == (2, 2)
assert c.data[0, 0] == 5
assert c.slices_original == (slice(1, 3), slice(1, 3))
assert c.slices_cutout == (slice(0, 2), slice(0, 2))
def test_size_angle_without_wcs(self):
with pytest.raises(ValueError):
Cutout2D(self.data, (2, 2), (3, 3 * u.arcsec))
def test_cutout_trim_overlap(self):
c = Cutout2D(self.data, (0, 0), (3, 3), mode="trim")
assert c.data.shape == (2, 2)
assert c.data[0, 0] == 0
assert c.slices_original == (slice(0, 2), slice(0, 2))
assert c.slices_cutout == (slice(0, 2), slice(0, 2))
def test_cutout_partial_overlap(self):
c = Cutout2D(self.data, (0, 0), (3, 3), mode="partial")
assert c.data.shape == (3, 3)
assert c.data[1, 1] == 0
assert c.slices_original == (slice(0, 2), slice(0, 2))
assert c.slices_cutout == (slice(1, 3), slice(1, 3))
def test_cutout_partial_overlap_fill_value(self):
fill_value = -99
c = Cutout2D(self.data, (0, 0), (3, 3), mode="partial", fill_value=fill_value)
assert c.data.shape == (3, 3)
assert c.data[1, 1] == 0
assert c.data[0, 0] == fill_value
def test_copy(self):
data = np.copy(self.data)
c = Cutout2D(data, (2, 3), (3, 3))
xy = (0, 0)
value = 100.0
c.data[xy] = value
xy_orig = c.to_original_position(xy)
yx = xy_orig[::-1]
assert data[yx] == value
data = np.copy(self.data)
c2 = Cutout2D(self.data, (2, 3), (3, 3), copy=True)
c2.data[xy] = value
assert data[yx] != value
def test_to_from_large(self):
position = (2, 2)
c = Cutout2D(self.data, position, (3, 3))
xy = (0, 0)
result = c.to_cutout_position(c.to_original_position(xy))
assert_allclose(result, xy)
def test_skycoord_without_wcs(self):
with pytest.raises(ValueError):
Cutout2D(self.data, self.position, (3, 3))
def test_skycoord(self):
c = Cutout2D(self.data, self.position, (3, 3), wcs=self.wcs)
skycoord_original = self.position.from_pixel(
c.center_original[1], c.center_original[0], self.wcs
)
skycoord_cutout = self.position.from_pixel(
c.center_cutout[1], c.center_cutout[0], c.wcs
)
assert_quantity_allclose(skycoord_original.ra, skycoord_cutout.ra)
assert_quantity_allclose(skycoord_original.dec, skycoord_cutout.dec)
def test_skycoord_partial(self):
c = Cutout2D(self.data, self.position, (3, 3), wcs=self.wcs, mode="partial")
skycoord_original = self.position.from_pixel(
c.center_original[1], c.center_original[0], self.wcs
)
skycoord_cutout = self.position.from_pixel(
c.center_cutout[1], c.center_cutout[0], c.wcs
)
assert_quantity_allclose(skycoord_original.ra, skycoord_cutout.ra)
assert_quantity_allclose(skycoord_original.dec, skycoord_cutout.dec)
def test_naxis_update(self):
xsize = 2
ysize = 3
c = Cutout2D(self.data, self.position, (ysize, xsize), wcs=self.wcs)
assert c.wcs.array_shape == (ysize, xsize)
def test_crpix_maps_to_crval(self):
w = Cutout2D(self.data, (0, 0), (3, 3), wcs=self.sipwcs, mode="partial").wcs
pscale = np.sqrt(proj_plane_pixel_area(w))
assert_allclose(
w.wcs_pix2world(*w.wcs.crpix, 1), w.wcs.crval, rtol=0.0, atol=1e-6 * pscale
)
assert_allclose(
w.all_pix2world(*w.wcs.crpix, 1), w.wcs.crval, rtol=0.0, atol=1e-6 * pscale
)
def test_cutout_with_nddata_as_input(self):
# This is essentially a copy/paste of test_skycoord with the
# input a ccd with wcs attribute instead of passing the
# wcs separately.
ccd = CCDData(data=self.data, wcs=self.wcs, unit="adu")
c = Cutout2D(ccd, self.position, (3, 3))
skycoord_original = self.position.from_pixel(
c.center_original[1], c.center_original[0], self.wcs
)
skycoord_cutout = self.position.from_pixel(
c.center_cutout[1], c.center_cutout[0], c.wcs
)
assert_quantity_allclose(skycoord_original.ra, skycoord_cutout.ra)
assert_quantity_allclose(skycoord_original.dec, skycoord_cutout.dec)
|
f0f6b8b6dc01c83c5044809eff00edc4f8eb24bd93f5251fc23a7d9920bb0e6b | from astropy.nddata import NDData, NDDataRef, NDIOMixin # noqa: F401
# Alias NDDataAllMixins in case this will be renamed ... :-)
NDDataIO = NDDataRef
def test_simple_write_read():
ndd = NDDataIO([1, 2, 3])
assert hasattr(ndd, "read")
assert hasattr(ndd, "write")
|
149f3f44e6b3a240eff1b1a059673ea9d876966b5d8384a89a10f25928bc029b | from .high_level_api import HighLevelWCSMixin
from .low_level_api import BaseLowLevelWCS
from .utils import wcs_info_str
__all__ = ["HighLevelWCSWrapper"]
class HighLevelWCSWrapper(HighLevelWCSMixin):
"""
Wrapper class that can take any :class:`~astropy.wcs.wcsapi.BaseLowLevelWCS`
object and expose the high-level WCS API.
"""
def __init__(self, low_level_wcs):
if not isinstance(low_level_wcs, BaseLowLevelWCS):
raise TypeError(
"Input to a HighLevelWCSWrapper must be a low level WCS object"
)
self._low_level_wcs = low_level_wcs
@property
def low_level_wcs(self):
return self._low_level_wcs
@property
def pixel_n_dim(self):
"""
See `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim`.
"""
return self.low_level_wcs.pixel_n_dim
@property
def world_n_dim(self):
"""
See `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim`.
"""
return self.low_level_wcs.world_n_dim
@property
def world_axis_physical_types(self):
"""
See `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_physical_types`.
"""
return self.low_level_wcs.world_axis_physical_types
@property
def world_axis_units(self):
"""
See `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_units`.
"""
return self.low_level_wcs.world_axis_units
@property
def array_shape(self):
"""
See `~astropy.wcs.wcsapi.BaseLowLevelWCS.array_shape`.
"""
return self.low_level_wcs.array_shape
@property
def pixel_bounds(self):
"""
See `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_bounds`.
"""
return self.low_level_wcs.pixel_bounds
@property
def axis_correlation_matrix(self):
"""
See `~astropy.wcs.wcsapi.BaseLowLevelWCS.axis_correlation_matrix`.
"""
return self.low_level_wcs.axis_correlation_matrix
def _as_mpl_axes(self):
"""
See `~astropy.wcs.wcsapi.BaseLowLevelWCS._as_mpl_axes`.
"""
return self.low_level_wcs._as_mpl_axes()
def __str__(self):
return wcs_info_str(self.low_level_wcs)
def __repr__(self):
return f"{object.__repr__(self)}\n{str(self)}"
|
33386770e3bc2517cc44fb648db1cfb8d591346d30efcd1423c43ff78ef417ec | import abc
import os
import numpy as np
__all__ = ["BaseLowLevelWCS", "validate_physical_types"]
class BaseLowLevelWCS(metaclass=abc.ABCMeta):
"""
Abstract base class for the low-level WCS interface.
This is described in `APE 14: A shared Python interface for World Coordinate
Systems <https://doi.org/10.5281/zenodo.1188875>`_.
"""
@property
@abc.abstractmethod
def pixel_n_dim(self):
"""
The number of axes in the pixel coordinate system.
"""
@property
@abc.abstractmethod
def world_n_dim(self):
"""
The number of axes in the world coordinate system.
"""
@property
@abc.abstractmethod
def world_axis_physical_types(self):
"""
An iterable of strings describing the physical type for each world axis.
These should be names from the VO UCD1+ controlled Vocabulary
(http://www.ivoa.net/documents/latest/UCDlist.html). If no matching UCD
type exists, this can instead be ``"custom:xxx"``, where ``xxx`` is an
arbitrary string. Alternatively, if the physical type is
unknown/undefined, an element can be `None`.
"""
@property
@abc.abstractmethod
def world_axis_units(self):
"""
An iterable of strings given the units of the world coordinates for each
axis.
The strings should follow the `IVOA VOUnit standard
<http://ivoa.net/documents/VOUnits/>`_ (though as noted in the VOUnit
specification document, units that do not follow this standard are still
allowed, but just not recommended).
"""
@abc.abstractmethod
def pixel_to_world_values(self, *pixel_arrays):
"""
Convert pixel coordinates to world coordinates.
This method takes `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` scalars or arrays as
input, and pixel coordinates should be zero-based. Returns
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` scalars or arrays in units given by
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_units`. Note that pixel coordinates are
assumed to be 0 at the center of the first pixel in each dimension. If a
pixel is in a region where the WCS is not defined, NaN can be returned.
The coordinates should be specified in the ``(x, y)`` order, where for
an image, ``x`` is the horizontal coordinate and ``y`` is the vertical
coordinate.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
def array_index_to_world_values(self, *index_arrays):
"""
Convert array indices to world coordinates.
This is the same as `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values` except that
the indices should be given in ``(i, j)`` order, where for an image
``i`` is the row and ``j`` is the column (i.e. the opposite order to
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values`).
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
return self.pixel_to_world_values(*index_arrays[::-1])
@abc.abstractmethod
def world_to_pixel_values(self, *world_arrays):
"""
Convert world coordinates to pixel coordinates.
This method takes `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` scalars or arrays as
input in units given by `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_units`. Returns
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` scalars or arrays. Note that pixel
coordinates are assumed to be 0 at the center of the first pixel in each
dimension. If a world coordinate does not have a matching pixel
coordinate, NaN can be returned. The coordinates should be returned in
the ``(x, y)`` order, where for an image, ``x`` is the horizontal
coordinate and ``y`` is the vertical coordinate.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
def world_to_array_index_values(self, *world_arrays):
"""
Convert world coordinates to array indices.
This is the same as `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_to_pixel_values` except that
the indices should be returned in ``(i, j)`` order, where for an image
``i`` is the row and ``j`` is the column (i.e. the opposite order to
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values`). The indices should be
returned as rounded integers.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
pixel_arrays = self.world_to_pixel_values(*world_arrays)
if self.pixel_n_dim == 1:
pixel_arrays = (pixel_arrays,)
else:
pixel_arrays = pixel_arrays[::-1]
array_indices = tuple(
np.asarray(np.floor(pixel + 0.5), dtype=np.int_) for pixel in pixel_arrays
)
return array_indices[0] if self.pixel_n_dim == 1 else array_indices
@property
@abc.abstractmethod
def world_axis_object_components(self):
"""
A list with `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` elements giving information
on constructing high-level objects for the world coordinates.
Each element of the list is a tuple with three items:
* The first is a name for the world object this world array
corresponds to, which *must* match the string names used in
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes`. Note that names might
appear twice because two world arrays might correspond to a single
world object (e.g. a celestial coordinate might have both “ra” and
“dec” arrays, which correspond to a single sky coordinate object).
* The second element is either a string keyword argument name or a
positional index for the corresponding class from
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes`.
* The third argument is a string giving the name of the property
to access on the corresponding class from
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes` in
order to get numerical values. Alternatively, this argument can be a
callable Python object that takes a high-level coordinate object and
returns the numerical values suitable for passing to the low-level
WCS transformation methods.
See the document
`APE 14: A shared Python interface for World Coordinate Systems
<https://doi.org/10.5281/zenodo.1188875>`_ for examples.
"""
@property
@abc.abstractmethod
def world_axis_object_classes(self):
"""
A dictionary giving information on constructing high-level objects for
the world coordinates.
Each key of the dictionary is a string key from
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_components`, and each value is a
tuple with three elements or four elements:
* The first element of the tuple must be a class or a string specifying
the fully-qualified name of a class, which will specify the actual
Python object to be created.
* The second element, should be a tuple specifying the positional
arguments required to initialize the class. If
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_components` specifies that the
world coordinates should be passed as a positional argument, this this
tuple should include `None` placeholders for the world coordinates.
* The third tuple element must be a dictionary with the keyword
arguments required to initialize the class.
* Optionally, for advanced use cases, the fourth element (if present)
should be a callable Python object that gets called instead of the
class and gets passed the positional and keyword arguments. It should
return an object of the type of the first element in the tuple.
Note that we don't require the classes to be Astropy classes since there
is no guarantee that Astropy will have all the classes to represent all
kinds of world coordinates. Furthermore, we recommend that the output be
kept as human-readable as possible.
The classes used here should have the ability to do conversions by
passing an instance as the first argument to the same class with
different arguments (e.g. ``Time(Time(...), scale='tai')``). This is
a requirement for the implementation of the high-level interface.
The second and third tuple elements for each value of this dictionary
can in turn contain either instances of classes, or if necessary can
contain serialized versions that should take the same form as the main
classes described above (a tuple with three elements with the fully
qualified name of the class, then the positional arguments and the
keyword arguments). For low-level API objects implemented in Python, we
recommend simply returning the actual objects (not the serialized form)
for optimal performance. Implementations should either always or never
use serialized classes to represent Python objects, and should indicate
which of these they follow using the
`~astropy.wcs.wcsapi.BaseLowLevelWCS.serialized_classes` attribute.
See the document
`APE 14: A shared Python interface for World Coordinate Systems
<https://doi.org/10.5281/zenodo.1188875>`_ for examples .
"""
# The following three properties have default fallback implementations, so
# they are not abstract.
@property
def array_shape(self):
"""
The shape of the data that the WCS applies to as a tuple of length
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` in ``(row, column)``
order (the convention for arrays in Python).
If the WCS is valid in the context of a dataset with a particular
shape, then this property can be used to store the shape of the
data. This can be used for example if implementing slicing of WCS
objects. This is an optional property, and it should return `None`
if a shape is not known or relevant.
"""
if self.pixel_shape is None:
return None
else:
return self.pixel_shape[::-1]
@property
def pixel_shape(self):
"""
The shape of the data that the WCS applies to as a tuple of length
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` in ``(x, y)``
order (where for an image, ``x`` is the horizontal coordinate and ``y``
is the vertical coordinate).
If the WCS is valid in the context of a dataset with a particular
shape, then this property can be used to store the shape of the
data. This can be used for example if implementing slicing of WCS
objects. This is an optional property, and it should return `None`
if a shape is not known or relevant.
If you are interested in getting a shape that is comparable to that of
a Numpy array, you should use
`~astropy.wcs.wcsapi.BaseLowLevelWCS.array_shape` instead.
"""
return None
@property
def pixel_bounds(self):
"""
The bounds (in pixel coordinates) inside which the WCS is defined,
as a list with `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim`
``(min, max)`` tuples.
The bounds should be given in ``[(xmin, xmax), (ymin, ymax)]``
order. WCS solutions are sometimes only guaranteed to be accurate
within a certain range of pixel values, for example when defining a
WCS that includes fitted distortions. This is an optional property,
and it should return `None` if a shape is not known or relevant.
"""
return None
@property
def pixel_axis_names(self):
"""
An iterable of strings describing the name for each pixel axis.
If an axis does not have a name, an empty string should be returned
(this is the default behavior for all axes if a subclass does not
override this property). Note that these names are just for display
purposes and are not standardized.
"""
return [""] * self.pixel_n_dim
@property
def world_axis_names(self):
"""
An iterable of strings describing the name for each world axis.
If an axis does not have a name, an empty string should be returned
(this is the default behavior for all axes if a subclass does not
override this property). Note that these names are just for display
purposes and are not standardized. For standardized axis types, see
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_physical_types`.
"""
return [""] * self.world_n_dim
@property
def axis_correlation_matrix(self):
"""
Returns an (`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim`,
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim`) matrix that
indicates using booleans whether a given world coordinate depends on a
given pixel coordinate.
This defaults to a matrix where all elements are `True` in the absence
of any further information. For completely independent axes, the
diagonal would be `True` and all other entries `False`.
"""
return np.ones((self.world_n_dim, self.pixel_n_dim), dtype=bool)
@property
def serialized_classes(self):
"""
Indicates whether Python objects are given in serialized form or as
actual Python objects.
"""
return False
def _as_mpl_axes(self):
"""Compatibility hook for Matplotlib and WCSAxes.
With this method, one can do::
from astropy.wcs import WCS
import matplotlib.pyplot as plt
wcs = WCS('filename.fits')
fig = plt.figure()
ax = fig.add_axes([0.15, 0.1, 0.8, 0.8], projection=wcs)
...
and this will generate a plot with the correct WCS coordinates on the
axes.
"""
from astropy.visualization.wcsaxes import WCSAxes
return WCSAxes, {"wcs": self}
UCDS_FILE = os.path.join(os.path.dirname(__file__), "data", "ucds.txt")
with open(UCDS_FILE) as f:
VALID_UCDS = {x.strip() for x in f.read().splitlines()[1:]}
def validate_physical_types(physical_types):
"""
Validate a list of physical types against the UCD1+ standard.
"""
for physical_type in physical_types:
if (
physical_type is not None
and physical_type not in VALID_UCDS
and not physical_type.startswith("custom:")
):
raise ValueError(
f"'{physical_type}' is not a valid IOVA UCD1+ physical type. It must be"
" a string specified in the list"
" (http://www.ivoa.net/documents/latest/UCDlist.html) or if no"
" matching type exists it can be any string prepended with 'custom:'."
)
|
a046075e3d596c5d2d087a88d8571b731ac6d2ee602d370fbbff49a69342e3db | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import importlib
import numpy as np
__all__ = ["deserialize_class", "wcs_info_str"]
def deserialize_class(tpl, construct=True):
"""
Deserialize classes recursively.
"""
if not isinstance(tpl, tuple) or len(tpl) != 3:
raise ValueError("Expected a tuple of three values")
module, klass = tpl[0].rsplit(".", 1)
module = importlib.import_module(module)
klass = getattr(module, klass)
args = tuple(
deserialize_class(arg) if isinstance(arg, tuple) else arg for arg in tpl[1]
)
kwargs = dict(
(key, deserialize_class(val)) if isinstance(val, tuple) else (key, val)
for (key, val) in tpl[2].items()
)
if construct:
return klass(*args, **kwargs)
else:
return klass, args, kwargs
def wcs_info_str(wcs):
# Overall header
s = f"{wcs.__class__.__name__} Transformation\n\n"
s += "This transformation has {} pixel and {} world dimensions\n\n".format(
wcs.pixel_n_dim, wcs.world_n_dim
)
s += f"Array shape (Numpy order): {wcs.array_shape}\n\n"
# Pixel dimensions table
array_shape = wcs.array_shape or (0,)
pixel_shape = wcs.pixel_shape or (None,) * wcs.pixel_n_dim
# Find largest between header size and value length
pixel_dim_width = max(9, len(str(wcs.pixel_n_dim)))
pixel_nam_width = max(9, max(len(x) for x in wcs.pixel_axis_names))
pixel_siz_width = max(9, len(str(max(array_shape))))
# fmt: off
s += (('{0:' + str(pixel_dim_width) + 's}').format('Pixel Dim') + ' ' +
('{0:' + str(pixel_nam_width) + 's}').format('Axis Name') + ' ' +
('{0:' + str(pixel_siz_width) + 's}').format('Data size') + ' ' +
'Bounds\n')
# fmt: on
for ipix in range(wcs.pixel_n_dim):
# fmt: off
s += (('{0:' + str(pixel_dim_width) + 'g}').format(ipix) + ' ' +
('{0:' + str(pixel_nam_width) + 's}').format(wcs.pixel_axis_names[ipix] or 'None') + ' ' +
(" " * 5 + str(None) if pixel_shape[ipix] is None else
('{0:' + str(pixel_siz_width) + 'g}').format(pixel_shape[ipix])) + ' ' +
'{:s}'.format(str(None if wcs.pixel_bounds is None else wcs.pixel_bounds[ipix]) + '\n'))
# fmt: on
s += "\n"
# World dimensions table
# Find largest between header size and value length
world_dim_width = max(9, len(str(wcs.world_n_dim)))
world_nam_width = max(
9, max(len(x) if x is not None else 0 for x in wcs.world_axis_names)
)
world_typ_width = max(
13, max(len(x) if x is not None else 0 for x in wcs.world_axis_physical_types)
)
# fmt: off
s += (('{0:' + str(world_dim_width) + 's}').format('World Dim') + ' ' +
('{0:' + str(world_nam_width) + 's}').format('Axis Name') + ' ' +
('{0:' + str(world_typ_width) + 's}').format('Physical Type') + ' ' +
'Units\n')
# fmt: on
for iwrl in range(wcs.world_n_dim):
name = wcs.world_axis_names[iwrl] or "None"
typ = wcs.world_axis_physical_types[iwrl] or "None"
unit = wcs.world_axis_units[iwrl] or "unknown"
# fmt: off
s += (('{0:' + str(world_dim_width) + 'd}').format(iwrl) + ' ' +
('{0:' + str(world_nam_width) + 's}').format(name) + ' ' +
('{0:' + str(world_typ_width) + 's}').format(typ) + ' ' +
'{:s}'.format(unit + '\n'))
# fmt: on
s += "\n"
# Axis correlation matrix
pixel_dim_width = max(3, len(str(wcs.world_n_dim)))
s += "Correlation between pixel and world axes:\n\n"
# fmt: off
s += (' ' * world_dim_width + ' ' +
('{0:^' + str(wcs.pixel_n_dim * 5 - 2) + 's}').format('Pixel Dim') +
'\n')
s += (('{0:' + str(world_dim_width) + 's}').format('World Dim') +
''.join([' ' + ('{0:' + str(pixel_dim_width) + 'd}').format(ipix)
for ipix in range(wcs.pixel_n_dim)]) +
'\n')
# fmt: on
matrix = wcs.axis_correlation_matrix
matrix_str = np.empty(matrix.shape, dtype="U3")
matrix_str[matrix] = "yes"
matrix_str[~matrix] = "no"
for iwrl in range(wcs.world_n_dim):
# fmt: off
s += (('{0:' + str(world_dim_width) + 'd}').format(iwrl) +
''.join([' ' + ('{0:>' + str(pixel_dim_width) + 's}').format(matrix_str[iwrl, ipix])
for ipix in range(wcs.pixel_n_dim)]) +
'\n')
# fmt: on
# Make sure we get rid of the extra whitespace at the end of some lines
return "\n".join([l.rstrip() for l in s.splitlines()])
|
9e3692370139b810029bce41113380fee7b552baa1b6ab54847ceddb546a7c19 | import abc
from collections import OrderedDict, defaultdict
import numpy as np
from .utils import deserialize_class
__all__ = ["BaseHighLevelWCS", "HighLevelWCSMixin"]
def rec_getattr(obj, att):
for a in att.split("."):
obj = getattr(obj, a)
return obj
def default_order(components):
order = []
for key, _, _ in components:
if key not in order:
order.append(key)
return order
def _toindex(value):
"""Convert value to an int or an int array.
Input coordinates converted to integers
corresponding to the center of the pixel.
The convention is that the center of the pixel is
(0, 0), while the lower left corner is (-0.5, -0.5).
The outputs are used to index the mask.
Examples
--------
>>> _toindex(np.array([-0.5, 0.49999]))
array([0, 0])
>>> _toindex(np.array([0.5, 1.49999]))
array([1, 1])
>>> _toindex(np.array([1.5, 2.49999]))
array([2, 2])
"""
indx = np.asarray(np.floor(np.asarray(value) + 0.5), dtype=int)
return indx
class BaseHighLevelWCS(metaclass=abc.ABCMeta):
"""
Abstract base class for the high-level WCS interface.
This is described in `APE 14: A shared Python interface for World Coordinate
Systems <https://doi.org/10.5281/zenodo.1188875>`_.
"""
@property
@abc.abstractmethod
def low_level_wcs(self):
"""
Returns a reference to the underlying low-level WCS object.
"""
@abc.abstractmethod
def pixel_to_world(self, *pixel_arrays):
"""
Convert pixel coordinates to world coordinates (represented by
high-level objects).
If a single high-level object is used to represent the world coordinates
(i.e., if ``len(wcs.world_axis_object_classes) == 1``), it is returned
as-is (not in a tuple/list), otherwise a tuple of high-level objects is
returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values` for pixel
indexing and ordering conventions.
"""
def array_index_to_world(self, *index_arrays):
"""
Convert array indices to world coordinates (represented by Astropy
objects).
If a single high-level object is used to represent the world coordinates
(i.e., if ``len(wcs.world_axis_object_classes) == 1``), it is returned
as-is (not in a tuple/list), otherwise a tuple of high-level objects is
returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.array_index_to_world_values` for
pixel indexing and ordering conventions.
"""
return self.pixel_to_world(*index_arrays[::-1])
@abc.abstractmethod
def world_to_pixel(self, *world_objects):
"""
Convert world coordinates (represented by Astropy objects) to pixel
coordinates.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_to_pixel_values` for pixel
indexing and ordering conventions.
"""
def world_to_array_index(self, *world_objects):
"""
Convert world coordinates (represented by Astropy objects) to array
indices.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_to_array_index_values` for
pixel indexing and ordering conventions. The indices should be returned
as rounded integers.
"""
if self.pixel_n_dim == 1:
return _toindex(self.world_to_pixel(*world_objects))
else:
return tuple(_toindex(self.world_to_pixel(*world_objects)[::-1]).tolist())
def high_level_objects_to_values(*world_objects, low_level_wcs):
"""
Convert the input high level object to low level values.
This function uses the information in ``wcs.world_axis_object_classes`` and
``wcs.world_axis_object_components`` to convert the high level objects
(such as `~.SkyCoord`) to low level "values" `~.Quantity` objects.
This is used in `.HighLevelWCSMixin.world_to_pixel`, but provided as a
separate function for use in other places where needed.
Parameters
----------
*world_objects: object
High level coordinate objects.
low_level_wcs: `.BaseLowLevelWCS`
The WCS object to use to interpret the coordinates.
"""
# Cache the classes and components since this may be expensive
serialized_classes = low_level_wcs.world_axis_object_classes
components = low_level_wcs.world_axis_object_components
# Deserialize world_axis_object_classes using the default order
classes = OrderedDict()
for key in default_order(components):
if low_level_wcs.serialized_classes:
classes[key] = deserialize_class(serialized_classes[key], construct=False)
else:
classes[key] = serialized_classes[key]
# Check that the number of classes matches the number of inputs
if len(world_objects) != len(classes):
raise ValueError(
f"Number of world inputs ({len(world_objects)}) does not match expected"
f" ({len(classes)})"
)
# Determine whether the classes are uniquely matched, that is we check
# whether there is only one of each class.
world_by_key = {}
unique_match = True
for w in world_objects:
matches = []
for key, (klass, *_) in classes.items():
if isinstance(w, klass):
matches.append(key)
if len(matches) == 1:
world_by_key[matches[0]] = w
else:
unique_match = False
break
# If the match is not unique, the order of the classes needs to match,
# whereas if all classes are unique, we can still intelligently match
# them even if the order is wrong.
objects = {}
if unique_match:
for key, (klass, args, kwargs, *rest) in classes.items():
if len(rest) == 0:
klass_gen = klass
elif len(rest) == 1:
klass_gen = rest[0]
else:
raise ValueError(
"Tuples in world_axis_object_classes should have length 3 or 4"
)
# FIXME: For now SkyCoord won't auto-convert upon initialization
# https://github.com/astropy/astropy/issues/7689
from astropy.coordinates import SkyCoord
if isinstance(world_by_key[key], SkyCoord):
if "frame" in kwargs:
objects[key] = world_by_key[key].transform_to(kwargs["frame"])
else:
objects[key] = world_by_key[key]
else:
objects[key] = klass_gen(world_by_key[key], *args, **kwargs)
else:
for ikey, key in enumerate(classes):
klass, args, kwargs, *rest = classes[key]
if len(rest) == 0:
klass_gen = klass
elif len(rest) == 1:
klass_gen = rest[0]
else:
raise ValueError(
"Tuples in world_axis_object_classes should have length 3 or 4"
)
w = world_objects[ikey]
if not isinstance(w, klass):
raise ValueError(
"Expected the following order of world arguments:"
f" {', '.join([k.__name__ for (k, _, _) in classes.values()])}"
)
# FIXME: For now SkyCoord won't auto-convert upon initialization
# https://github.com/astropy/astropy/issues/7689
from astropy.coordinates import SkyCoord
if isinstance(w, SkyCoord):
if "frame" in kwargs:
objects[key] = w.transform_to(kwargs["frame"])
else:
objects[key] = w
else:
objects[key] = klass_gen(w, *args, **kwargs)
# We now extract the attributes needed for the world values
world = []
for key, _, attr in components:
if callable(attr):
world.append(attr(objects[key]))
else:
world.append(rec_getattr(objects[key], attr))
return world
def values_to_high_level_objects(*world_values, low_level_wcs):
"""
Convert low level values into high level objects.
This function uses the information in ``wcs.world_axis_object_classes`` and
``wcs.world_axis_object_components`` to convert low level "values"
`~.Quantity` objects, to high level objects (such as `~.SkyCoord).
This is used in `.HighLevelWCSMixin.pixel_to_world`, but provided as a
separate function for use in other places where needed.
Parameters
----------
*world_values: object
Low level, "values" representations of the world coordinates.
low_level_wcs: `.BaseLowLevelWCS`
The WCS object to use to interpret the coordinates.
"""
# Cache the classes and components since this may be expensive
components = low_level_wcs.world_axis_object_components
classes = low_level_wcs.world_axis_object_classes
# Deserialize classes
if low_level_wcs.serialized_classes:
classes_new = {}
for key, value in classes.items():
classes_new[key] = deserialize_class(value, construct=False)
classes = classes_new
args = defaultdict(list)
kwargs = defaultdict(dict)
for i, (key, attr, _) in enumerate(components):
if isinstance(attr, str):
kwargs[key][attr] = world_values[i]
else:
while attr > len(args[key]) - 1:
args[key].append(None)
args[key][attr] = world_values[i]
result = []
for key in default_order(components):
klass, ar, kw, *rest = classes[key]
if len(rest) == 0:
klass_gen = klass
elif len(rest) == 1:
klass_gen = rest[0]
else:
raise ValueError(
"Tuples in world_axis_object_classes should have length 3 or 4"
)
result.append(klass_gen(*args[key], *ar, **kwargs[key], **kw))
return result
class HighLevelWCSMixin(BaseHighLevelWCS):
"""
Mix-in class that automatically provides the high-level WCS API for the
low-level WCS object given by the `~HighLevelWCSMixin.low_level_wcs`
property.
"""
@property
def low_level_wcs(self):
return self
def world_to_pixel(self, *world_objects):
world_values = high_level_objects_to_values(
*world_objects, low_level_wcs=self.low_level_wcs
)
# Finally we convert to pixel coordinates
pixel_values = self.low_level_wcs.world_to_pixel_values(*world_values)
return pixel_values
def pixel_to_world(self, *pixel_arrays):
# Compute the world coordinate values
world_values = self.low_level_wcs.pixel_to_world_values(*pixel_arrays)
if self.world_n_dim == 1:
world_values = (world_values,)
pixel_values = values_to_high_level_objects(
*world_values, low_level_wcs=self.low_level_wcs
)
if len(pixel_values) == 1:
return pixel_values[0]
else:
return pixel_values
|
6135d34e341cb9df086faf07d9379aeb6ccbdc898f3231d1532c67aa7163584d | # This file includes the definition of a mix-in class that provides the low-
# and high-level WCS API to the astropy.wcs.WCS object. We keep this code
# isolated in this mix-in class to avoid making the main wcs.py file too
# long.
import warnings
import numpy as np
from astropy import units as u
from astropy.constants import c
from astropy.coordinates import ICRS, Galactic, SpectralCoord
from astropy.coordinates.spectral_coordinate import (
attach_zero_velocities,
update_differentials_to_match,
)
from astropy.utils.exceptions import AstropyUserWarning
from .high_level_api import HighLevelWCSMixin
from .low_level_api import BaseLowLevelWCS
from .wrappers import SlicedLowLevelWCS
__all__ = ["custom_ctype_to_ucd_mapping", "SlicedFITSWCS", "FITSWCSAPIMixin"]
C_SI = c.si.value
VELOCITY_FRAMES = {
"GEOCENT": "gcrs",
"BARYCENT": "icrs",
"HELIOCENT": "hcrs",
"LSRK": "lsrk",
"LSRD": "lsrd",
}
# The spectra velocity frames below are needed for FITS spectral WCS
# (see Greisen 06 table 12) but aren't yet defined as real
# astropy.coordinates frames, so we instead define them here as instances
# of existing coordinate frames with offset velocities. In future we should
# make these real frames so that users can more easily recognize these
# velocity frames when used in SpectralCoord.
# This frame is defined as a velocity of 220 km/s in the
# direction of l=90, b=0. The rotation velocity is defined
# in:
#
# Kerr and Lynden-Bell 1986, Review of galactic constants.
#
# NOTE: this may differ from the assumptions of galcen_v_sun
# in the Galactocentric frame - the value used here is
# the one adopted by the WCS standard for spectral
# transformations.
VELOCITY_FRAMES["GALACTOC"] = Galactic(
u=0 * u.km,
v=0 * u.km,
w=0 * u.km,
U=0 * u.km / u.s,
V=-220 * u.km / u.s,
W=0 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
)
# This frame is defined as a velocity of 300 km/s in the
# direction of l=90, b=0. This is defined in:
#
# Transactions of the IAU Vol. XVI B Proceedings of the
# 16th General Assembly, Reports of Meetings of Commissions:
# Comptes Rendus Des Séances Des Commissions, Commission 28,
# p201.
#
# Note that these values differ from those used by CASA
# (308 km/s towards l=105, b=-7) but we use the above values
# since these are the ones defined in Greisen et al (2006).
VELOCITY_FRAMES["LOCALGRP"] = Galactic(
u=0 * u.km,
v=0 * u.km,
w=0 * u.km,
U=0 * u.km / u.s,
V=-300 * u.km / u.s,
W=0 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
)
# This frame is defined as a velocity of 368 km/s in the
# direction of l=263.85, b=48.25. This is defined in:
#
# Bennett et al. (2003), First-Year Wilkinson Microwave
# Anisotropy Probe (WMAP) Observations: Preliminary Maps
# and Basic Results
#
# Note that in that paper, the dipole is expressed as a
# temperature (T=3.346 +/- 0.017mK)
VELOCITY_FRAMES["CMBDIPOL"] = Galactic(
l=263.85 * u.deg,
b=48.25 * u.deg,
distance=0 * u.km,
radial_velocity=-(3.346e-3 / 2.725 * c).to(u.km / u.s),
)
# Mapping from CTYPE axis name to UCD1
CTYPE_TO_UCD1 = {
# Celestial coordinates
"RA": "pos.eq.ra",
"DEC": "pos.eq.dec",
"GLON": "pos.galactic.lon",
"GLAT": "pos.galactic.lat",
"ELON": "pos.ecliptic.lon",
"ELAT": "pos.ecliptic.lat",
"TLON": "pos.bodyrc.lon",
"TLAT": "pos.bodyrc.lat",
"HPLT": "custom:pos.helioprojective.lat",
"HPLN": "custom:pos.helioprojective.lon",
"HPRZ": "custom:pos.helioprojective.z",
"HGLN": "custom:pos.heliographic.stonyhurst.lon",
"HGLT": "custom:pos.heliographic.stonyhurst.lat",
"CRLN": "custom:pos.heliographic.carrington.lon",
"CRLT": "custom:pos.heliographic.carrington.lat",
"SOLX": "custom:pos.heliocentric.x",
"SOLY": "custom:pos.heliocentric.y",
"SOLZ": "custom:pos.heliocentric.z",
# Spectral coordinates (WCS paper 3)
"FREQ": "em.freq", # Frequency
"ENER": "em.energy", # Energy
"WAVN": "em.wavenumber", # Wavenumber
"WAVE": "em.wl", # Vacuum wavelength
"VRAD": "spect.dopplerVeloc.radio", # Radio velocity
"VOPT": "spect.dopplerVeloc.opt", # Optical velocity
"ZOPT": "src.redshift", # Redshift
"AWAV": "em.wl", # Air wavelength
"VELO": "spect.dopplerVeloc", # Apparent radial velocity
"BETA": "custom:spect.doplerVeloc.beta", # Beta factor (v/c)
"STOKES": "phys.polarization.stokes", # STOKES parameters
# Time coordinates (https://www.aanda.org/articles/aa/pdf/2015/02/aa24653-14.pdf)
"TIME": "time",
"TAI": "time",
"TT": "time",
"TDT": "time",
"ET": "time",
"IAT": "time",
"UT1": "time",
"UTC": "time",
"GMT": "time",
"GPS": "time",
"TCG": "time",
"TCB": "time",
"TDB": "time",
"LOCAL": "time",
# Distance coordinates
"DIST": "pos.distance",
"DSUN": "custom:pos.distance.sunToObserver"
# UT() and TT() are handled separately in world_axis_physical_types
}
# Keep a list of additional custom mappings that have been registered. This
# is kept as a list in case nested context managers are used
CTYPE_TO_UCD1_CUSTOM = []
class custom_ctype_to_ucd_mapping:
"""
A context manager that makes it possible to temporarily add new CTYPE to
UCD1+ mapping used by :attr:`FITSWCSAPIMixin.world_axis_physical_types`.
Parameters
----------
mapping : dict
A dictionary mapping a CTYPE value to a UCD1+ value
Examples
--------
Consider a WCS with the following CTYPE::
>>> from astropy.wcs import WCS
>>> wcs = WCS(naxis=1)
>>> wcs.wcs.ctype = ['SPAM']
By default, :attr:`FITSWCSAPIMixin.world_axis_physical_types` returns `None`,
but this can be overridden::
>>> wcs.world_axis_physical_types
[None]
>>> with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):
... wcs.world_axis_physical_types
['food.spam']
"""
def __init__(self, mapping):
CTYPE_TO_UCD1_CUSTOM.insert(0, mapping)
self.mapping = mapping
def __enter__(self):
pass
def __exit__(self, type, value, tb):
CTYPE_TO_UCD1_CUSTOM.remove(self.mapping)
class SlicedFITSWCS(SlicedLowLevelWCS, HighLevelWCSMixin):
pass
class FITSWCSAPIMixin(BaseLowLevelWCS, HighLevelWCSMixin):
"""
A mix-in class that is intended to be inherited by the
:class:`~astropy.wcs.WCS` class and provides the low- and high-level WCS API.
"""
@property
def pixel_n_dim(self):
return self.naxis
@property
def world_n_dim(self):
return len(self.wcs.ctype)
@property
def array_shape(self):
if self.pixel_shape is None:
return None
else:
return self.pixel_shape[::-1]
@array_shape.setter
def array_shape(self, value):
if value is None:
self.pixel_shape = None
else:
self.pixel_shape = value[::-1]
@property
def pixel_shape(self):
if self._naxis == [0, 0]:
return None
else:
return tuple(self._naxis)
@pixel_shape.setter
def pixel_shape(self, value):
if value is None:
self._naxis = [0, 0]
else:
if len(value) != self.naxis:
raise ValueError(
f"The number of data axes, {self.naxis}, does not equal the shape"
f" {len(value)}."
)
self._naxis = list(value)
@property
def pixel_bounds(self):
return self._pixel_bounds
@pixel_bounds.setter
def pixel_bounds(self, value):
if value is None:
self._pixel_bounds = value
else:
if len(value) != self.naxis:
raise ValueError(
"The number of data axes, "
f"{self.naxis}, does not equal the number of "
f"pixel bounds {len(value)}."
)
self._pixel_bounds = list(value)
@property
def world_axis_physical_types(self):
types = []
# TODO: need to support e.g. TT(TAI)
for ctype in self.wcs.ctype:
if ctype.upper().startswith(("UT(", "TT(")):
types.append("time")
else:
ctype_name = ctype.split("-")[0]
for custom_mapping in CTYPE_TO_UCD1_CUSTOM:
if ctype_name in custom_mapping:
types.append(custom_mapping[ctype_name])
break
else:
types.append(CTYPE_TO_UCD1.get(ctype_name.upper(), None))
return types
@property
def world_axis_units(self):
units = []
for unit in self.wcs.cunit:
if unit is None:
unit = ""
elif isinstance(unit, u.Unit):
unit = unit.to_string(format="vounit")
else:
try:
unit = u.Unit(unit).to_string(format="vounit")
except u.UnitsError:
unit = ""
units.append(unit)
return units
@property
def world_axis_names(self):
return list(self.wcs.cname)
@property
def axis_correlation_matrix(self):
# If there are any distortions present, we assume that there may be
# correlations between all axes. Maybe if some distortions only apply
# to the image plane we can improve this?
if self.has_distortion:
return np.ones((self.world_n_dim, self.pixel_n_dim), dtype=bool)
# Assuming linear world coordinates along each axis, the correlation
# matrix would be given by whether or not the PC matrix is zero
matrix = self.wcs.get_pc() != 0
# We now need to check specifically for celestial coordinates since
# these can assume correlations because of spherical distortions. For
# each celestial coordinate we copy over the pixel dependencies from
# the other celestial coordinates.
celestial = (self.wcs.axis_types // 1000) % 10 == 2
celestial_indices = np.nonzero(celestial)[0]
for world1 in celestial_indices:
for world2 in celestial_indices:
if world1 != world2:
matrix[world1] |= matrix[world2]
matrix[world2] |= matrix[world1]
return matrix
def pixel_to_world_values(self, *pixel_arrays):
world = self.all_pix2world(*pixel_arrays, 0)
return world[0] if self.world_n_dim == 1 else tuple(world)
def world_to_pixel_values(self, *world_arrays):
# avoid circular import
from astropy.wcs.wcs import NoConvergence
try:
pixel = self.all_world2pix(*world_arrays, 0)
except NoConvergence as e:
warnings.warn(str(e))
# use best_solution contained in the exception and format the same
# way as all_world2pix does (using _array_converter)
pixel = self._array_converter(
lambda *args: e.best_solution, "input", *world_arrays, 0
)
return pixel[0] if self.pixel_n_dim == 1 else tuple(pixel)
@property
def world_axis_object_components(self):
return self._get_components_and_classes()[0]
@property
def world_axis_object_classes(self):
return self._get_components_and_classes()[1]
@property
def serialized_classes(self):
return False
def _get_components_and_classes(self):
# The aim of this function is to return whatever is needed for
# world_axis_object_components and world_axis_object_classes. It's easier
# to figure it out in one go and then return the values and let the
# properties return part of it.
# Since this method might get called quite a few times, we need to cache
# it. We start off by defining a hash based on the attributes of the
# WCS that matter here (we can't just use the WCS object as a hash since
# it is mutable)
wcs_hash = (
self.naxis,
list(self.wcs.ctype),
list(self.wcs.cunit),
self.wcs.radesys,
self.wcs.specsys,
self.wcs.equinox,
self.wcs.dateobs,
self.wcs.lng,
self.wcs.lat,
)
# If the cache is present, we need to check that the 'hash' matches.
if getattr(self, "_components_and_classes_cache", None) is not None:
cache = self._components_and_classes_cache
if cache[0] == wcs_hash:
return cache[1]
else:
self._components_and_classes_cache = None
# Avoid circular imports by importing here
from astropy.coordinates import EarthLocation, SkyCoord
from astropy.time import Time, TimeDelta
from astropy.time.formats import FITS_DEPRECATED_SCALES
from astropy.wcs.utils import wcs_to_celestial_frame
components = [None] * self.naxis
classes = {}
# Let's start off by checking whether the WCS has a pair of celestial
# components
if self.has_celestial:
try:
celestial_frame = wcs_to_celestial_frame(self)
except ValueError:
# Some WCSes, e.g. solar, can be recognized by WCSLIB as being
# celestial but we don't necessarily have frames for them.
celestial_frame = None
else:
kwargs = {}
kwargs["frame"] = celestial_frame
kwargs["unit"] = u.deg
classes["celestial"] = (SkyCoord, (), kwargs)
components[self.wcs.lng] = ("celestial", 0, "spherical.lon.degree")
components[self.wcs.lat] = ("celestial", 1, "spherical.lat.degree")
# Next, we check for spectral components
if self.has_spectral:
# Find index of spectral coordinate
ispec = self.wcs.spec
ctype = self.wcs.ctype[ispec][:4]
ctype = ctype.upper()
kwargs = {}
# Determine observer location and velocity
# TODO: determine how WCS standard would deal with observer on a
# spacecraft far from earth. For now assume the obsgeo parameters,
# if present, give the geocentric observer location.
if np.isnan(self.wcs.obsgeo[0]):
observer = None
else:
earth_location = EarthLocation(*self.wcs.obsgeo[:3], unit=u.m)
# Get the time scale from TIMESYS or fall back to 'utc'
tscale = self.wcs.timesys or "utc"
if np.isnan(self.wcs.mjdavg):
obstime = Time(
self.wcs.mjdobs,
format="mjd",
scale=tscale,
location=earth_location,
)
else:
obstime = Time(
self.wcs.mjdavg,
format="mjd",
scale=tscale,
location=earth_location,
)
observer_location = SkyCoord(earth_location.get_itrs(obstime=obstime))
if self.wcs.specsys in VELOCITY_FRAMES:
frame = VELOCITY_FRAMES[self.wcs.specsys]
observer = observer_location.transform_to(frame)
if isinstance(frame, str):
observer = attach_zero_velocities(observer)
else:
observer = update_differentials_to_match(
observer_location,
VELOCITY_FRAMES[self.wcs.specsys],
preserve_observer_frame=True,
)
elif self.wcs.specsys == "TOPOCENT":
observer = attach_zero_velocities(observer_location)
else:
raise NotImplementedError(
f"SPECSYS={self.wcs.specsys} not yet supported"
)
# Determine target
# This is tricker. In principle the target for each pixel is the
# celestial coordinates of the pixel, but we then need to be very
# careful about SSYSOBS which is tricky. For now, we set the
# target using the reference celestial coordinate in the WCS (if
# any).
if self.has_celestial and celestial_frame is not None:
# NOTE: celestial_frame was defined higher up
# NOTE: we set the distance explicitly to avoid warnings in SpectralCoord
target = SkyCoord(
self.wcs.crval[self.wcs.lng] * self.wcs.cunit[self.wcs.lng],
self.wcs.crval[self.wcs.lat] * self.wcs.cunit[self.wcs.lat],
frame=celestial_frame,
distance=1000 * u.kpc,
)
target = attach_zero_velocities(target)
else:
target = None
# SpectralCoord does not work properly if either observer or target
# are not convertible to ICRS, so if this is the case, we (for now)
# drop the observer and target from the SpectralCoord and warn the
# user.
if observer is not None:
try:
observer.transform_to(ICRS())
except Exception:
warnings.warn(
"observer cannot be converted to ICRS, so will "
"not be set on SpectralCoord",
AstropyUserWarning,
)
observer = None
if target is not None:
try:
target.transform_to(ICRS())
except Exception:
warnings.warn(
"target cannot be converted to ICRS, so will "
"not be set on SpectralCoord",
AstropyUserWarning,
)
target = None
# NOTE: below we include Quantity in classes['spectral'] instead
# of SpectralCoord - this is because we want to also be able to
# accept plain quantities.
if ctype == "ZOPT":
def spectralcoord_from_redshift(redshift):
if isinstance(redshift, SpectralCoord):
return redshift
return SpectralCoord(
(redshift + 1) * self.wcs.restwav,
unit=u.m,
observer=observer,
target=target,
)
def redshift_from_spectralcoord(spectralcoord):
# TODO: check target is consistent between WCS and SpectralCoord,
# if they are not the transformation doesn't make conceptual sense.
if (
observer is None
or spectralcoord.observer is None
or spectralcoord.target is None
):
if observer is None:
msg = "No observer defined on WCS"
elif spectralcoord.observer is None:
msg = "No observer defined on SpectralCoord"
else:
msg = "No target defined on SpectralCoord"
warnings.warn(
f"{msg}, SpectralCoord "
"will be converted without any velocity "
"frame change",
AstropyUserWarning,
)
return spectralcoord.to_value(u.m) / self.wcs.restwav - 1.0
else:
return (
spectralcoord.with_observer_stationary_relative_to(
observer
).to_value(u.m)
/ self.wcs.restwav
- 1.0
)
classes["spectral"] = (u.Quantity, (), {}, spectralcoord_from_redshift)
components[self.wcs.spec] = ("spectral", 0, redshift_from_spectralcoord)
elif ctype == "BETA":
def spectralcoord_from_beta(beta):
if isinstance(beta, SpectralCoord):
return beta
return SpectralCoord(
beta * C_SI,
unit=u.m / u.s,
doppler_convention="relativistic",
doppler_rest=self.wcs.restwav * u.m,
observer=observer,
target=target,
)
def beta_from_spectralcoord(spectralcoord):
# TODO: check target is consistent between WCS and SpectralCoord,
# if they are not the transformation doesn't make conceptual sense.
doppler_equiv = u.doppler_relativistic(self.wcs.restwav * u.m)
if (
observer is None
or spectralcoord.observer is None
or spectralcoord.target is None
):
if observer is None:
msg = "No observer defined on WCS"
elif spectralcoord.observer is None:
msg = "No observer defined on SpectralCoord"
else:
msg = "No target defined on SpectralCoord"
warnings.warn(
f"{msg}, SpectralCoord "
"will be converted without any velocity "
"frame change",
AstropyUserWarning,
)
return spectralcoord.to_value(u.m / u.s, doppler_equiv) / C_SI
else:
return (
spectralcoord.with_observer_stationary_relative_to(
observer
).to_value(u.m / u.s, doppler_equiv)
/ C_SI
)
classes["spectral"] = (u.Quantity, (), {}, spectralcoord_from_beta)
components[self.wcs.spec] = ("spectral", 0, beta_from_spectralcoord)
else:
kwargs["unit"] = self.wcs.cunit[ispec]
if self.wcs.restfrq > 0:
if ctype == "VELO":
kwargs["doppler_convention"] = "relativistic"
kwargs["doppler_rest"] = self.wcs.restfrq * u.Hz
elif ctype == "VRAD":
kwargs["doppler_convention"] = "radio"
kwargs["doppler_rest"] = self.wcs.restfrq * u.Hz
elif ctype == "VOPT":
kwargs["doppler_convention"] = "optical"
kwargs["doppler_rest"] = self.wcs.restwav * u.m
def spectralcoord_from_value(value):
if isinstance(value, SpectralCoord):
return value
return SpectralCoord(
value, observer=observer, target=target, **kwargs
)
def value_from_spectralcoord(spectralcoord):
# TODO: check target is consistent between WCS and SpectralCoord,
# if they are not the transformation doesn't make conceptual sense.
if (
observer is None
or spectralcoord.observer is None
or spectralcoord.target is None
):
if observer is None:
msg = "No observer defined on WCS"
elif spectralcoord.observer is None:
msg = "No observer defined on SpectralCoord"
else:
msg = "No target defined on SpectralCoord"
warnings.warn(
f"{msg}, SpectralCoord "
"will be converted without any velocity "
"frame change",
AstropyUserWarning,
)
return spectralcoord.to_value(**kwargs)
else:
return spectralcoord.with_observer_stationary_relative_to(
observer
).to_value(**kwargs)
classes["spectral"] = (u.Quantity, (), {}, spectralcoord_from_value)
components[self.wcs.spec] = ("spectral", 0, value_from_spectralcoord)
# We can then make sure we correctly return Time objects where appropriate
# (https://www.aanda.org/articles/aa/pdf/2015/02/aa24653-14.pdf)
if "time" in self.world_axis_physical_types:
multiple_time = self.world_axis_physical_types.count("time") > 1
for i in range(self.naxis):
if self.world_axis_physical_types[i] == "time":
if multiple_time:
name = f"time.{i}"
else:
name = "time"
# Initialize delta
reference_time_delta = None
# Extract time scale
scale = self.wcs.ctype[i].lower()
if scale == "time":
if self.wcs.timesys:
scale = self.wcs.timesys.lower()
else:
scale = "utc"
# Drop sub-scales
if "(" in scale:
pos = scale.index("(")
scale, subscale = scale[:pos], scale[pos + 1 : -1]
warnings.warn(
"Dropping unsupported sub-scale "
f"{subscale.upper()} from scale {scale.upper()}",
UserWarning,
)
# TODO: consider having GPS as a scale in Time
# For now GPS is not a scale, we approximate this by TAI - 19s
if scale == "gps":
reference_time_delta = TimeDelta(19, format="sec")
scale = "tai"
elif scale.upper() in FITS_DEPRECATED_SCALES:
scale = FITS_DEPRECATED_SCALES[scale.upper()]
elif scale not in Time.SCALES:
raise ValueError(f"Unrecognized time CTYPE={self.wcs.ctype[i]}")
# Determine location
trefpos = self.wcs.trefpos.lower()
if trefpos.startswith("topocent"):
# Note that some headers use TOPOCENT instead of TOPOCENTER
if np.any(np.isnan(self.wcs.obsgeo[:3])):
warnings.warn(
"Missing or incomplete observer location "
"information, setting location in Time to None",
UserWarning,
)
location = None
else:
location = EarthLocation(*self.wcs.obsgeo[:3], unit=u.m)
elif trefpos == "geocenter":
location = EarthLocation(0, 0, 0, unit=u.m)
elif trefpos == "":
location = None
else:
# TODO: implement support for more locations when Time supports it
warnings.warn(
f"Observation location '{trefpos}' is not "
"supported, setting location in Time to None",
UserWarning,
)
location = None
reference_time = Time(
np.nan_to_num(self.wcs.mjdref[0]),
np.nan_to_num(self.wcs.mjdref[1]),
format="mjd",
scale=scale,
location=location,
)
if reference_time_delta is not None:
reference_time = reference_time + reference_time_delta
def time_from_reference_and_offset(offset):
if isinstance(offset, Time):
return offset
return reference_time + TimeDelta(offset, format="sec")
def offset_from_time_and_reference(time):
return (time - reference_time).sec
classes[name] = (Time, (), {}, time_from_reference_and_offset)
components[i] = (name, 0, offset_from_time_and_reference)
# Fallback: for any remaining components that haven't been identified, just
# return Quantity as the class to use
for i in range(self.naxis):
if components[i] is None:
name = self.wcs.ctype[i].split("-")[0].lower()
if name == "":
name = "world"
while name in classes:
name += "_"
classes[name] = (u.Quantity, (), {"unit": self.wcs.cunit[i]})
components[i] = (name, 0, "value")
# Keep a cached version of result
self._components_and_classes_cache = wcs_hash, (components, classes)
return components, classes
|
a83be5d734a205df3e02af3c2682cac25204014a4d612ecbaf7cc029d9636615 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
def test_wtbarr_i(tab_wcs_2di):
assert tab_wcs_2di.wcs.wtb[0].i == 1
def test_wtbarr_m(tab_wcs_2di):
assert tab_wcs_2di.wcs.wtb[0].m == 1
def test_wtbarr_kind(tab_wcs_2di):
assert tab_wcs_2di.wcs.wtb[0].kind == "c"
def test_wtbarr_extnam(tab_wcs_2di):
assert tab_wcs_2di.wcs.wtb[0].extnam == "WCS-TABLE"
def test_wtbarr_extver(tab_wcs_2di):
assert tab_wcs_2di.wcs.wtb[0].extver == 1
def test_wtbarr_extlev(tab_wcs_2di):
assert tab_wcs_2di.wcs.wtb[0].extlev == 1
def test_wtbarr_ttype(tab_wcs_2di):
assert tab_wcs_2di.wcs.wtb[0].ttype == "wavelength"
def test_wtbarr_row(tab_wcs_2di):
assert tab_wcs_2di.wcs.wtb[0].row == 1
def test_wtbarr_ndim(tab_wcs_2di):
assert tab_wcs_2di.wcs.wtb[0].ndim == 3
def test_wtbarr_print(tab_wcs_2di, capfd):
tab_wcs_2di.wcs.wtb[0].print_contents()
captured = capfd.readouterr()
s = str(tab_wcs_2di.wcs.wtb[0])
lines = s.split("\n")
assert captured.out == s
assert lines[0] == " i: 1"
assert lines[1] == " m: 1"
assert lines[2] == " kind: c"
assert lines[3] == "extnam: WCS-TABLE"
assert lines[4] == "extver: 1"
assert lines[5] == "extlev: 1"
assert lines[6] == " ttype: wavelength"
assert lines[7] == " row: 1"
assert lines[8] == " ndim: 3"
assert lines[9].startswith("dimlen: ")
assert lines[10] == " 0: 4"
assert lines[11] == " 1: 2"
assert lines[12].startswith("arrayp: ")
|
dcea52b4b2d54edbba1c9a764c8bb46f393bb23f4159e1d3d48b6cedec2a3898 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import gc
import locale
import re
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal, assert_array_equal
from packaging.version import Version
from astropy import units as u
from astropy.io import fits
from astropy.units.core import UnitsWarning
from astropy.utils.data import (
get_pkg_data_contents,
get_pkg_data_filename,
get_pkg_data_fileobj,
)
from astropy.utils.misc import _set_locale
from astropy.wcs import _wcs, wcs
from astropy.wcs.wcs import FITSFixedWarning
######################################################################
def test_alt():
w = _wcs.Wcsprm()
assert w.alt == " "
w.alt = "X"
assert w.alt == "X"
del w.alt
assert w.alt == " "
def test_alt_invalid1():
w = _wcs.Wcsprm()
with pytest.raises(ValueError):
w.alt = "$"
def test_alt_invalid2():
w = _wcs.Wcsprm()
with pytest.raises(ValueError):
w.alt = " "
def test_axis_types():
w = _wcs.Wcsprm()
assert_array_equal(w.axis_types, [0, 0])
def test_cd():
w = _wcs.Wcsprm()
w.cd = [[1, 0], [0, 1]]
assert w.cd.dtype == float
assert w.has_cd() is True
assert_array_equal(w.cd, [[1, 0], [0, 1]])
del w.cd
assert w.has_cd() is False
def test_cd_missing():
w = _wcs.Wcsprm()
assert w.has_cd() is False
with pytest.raises(AttributeError):
w.cd
def test_cd_missing2():
w = _wcs.Wcsprm()
w.cd = [[1, 0], [0, 1]]
assert w.has_cd() is True
del w.cd
assert w.has_cd() is False
with pytest.raises(AttributeError):
w.cd
def test_cd_invalid():
w = _wcs.Wcsprm()
with pytest.raises(ValueError):
w.cd = [1, 0, 0, 1]
def test_cdfix():
w = _wcs.Wcsprm()
w.cdfix()
def test_cdelt():
w = _wcs.Wcsprm()
assert_array_equal(w.cdelt, [1, 1])
w.cdelt = [42, 54]
assert_array_equal(w.cdelt, [42, 54])
def test_cdelt_delete():
w = _wcs.Wcsprm()
with pytest.raises(TypeError):
del w.cdelt
def test_cel_offset():
w = _wcs.Wcsprm()
assert w.cel_offset is False
w.cel_offset = "foo"
assert w.cel_offset is True
w.cel_offset = 0
assert w.cel_offset is False
def test_celfix():
# TODO: We need some data with -NCP or -GLS projections to test
# with. For now, this is just a smoke test
w = _wcs.Wcsprm()
assert w.celfix() == -1
def test_cname():
w = _wcs.Wcsprm()
# Test that this works as an iterator
for x in w.cname:
assert x == ""
assert list(w.cname) == ["", ""]
w.cname = [b"foo", "bar"]
assert list(w.cname) == ["foo", "bar"]
def test_cname_invalid():
w = _wcs.Wcsprm()
with pytest.raises(TypeError):
w.cname = [42, 54]
def test_colax():
w = _wcs.Wcsprm()
assert w.colax.dtype == np.intc
assert_array_equal(w.colax, [0, 0])
w.colax = [42, 54]
assert_array_equal(w.colax, [42, 54])
w.colax[0] = 0
assert_array_equal(w.colax, [0, 54])
with pytest.raises(ValueError):
w.colax = [1, 2, 3]
def test_colnum():
w = _wcs.Wcsprm()
assert w.colnum == 0
w.colnum = 42
assert w.colnum == 42
with pytest.raises(OverflowError):
w.colnum = 0xFFFFFFFFFFFFFFFFFFFF
with pytest.raises(OverflowError):
w.colnum = 0xFFFFFFFF
with pytest.raises(TypeError):
del w.colnum
def test_colnum_invalid():
w = _wcs.Wcsprm()
with pytest.raises(TypeError):
w.colnum = "foo"
def test_crder():
w = _wcs.Wcsprm()
assert w.crder.dtype == float
assert np.all(np.isnan(w.crder))
w.crder[0] = 0
assert np.isnan(w.crder[1])
assert w.crder[0] == 0
w.crder = w.crder
def test_crota():
w = _wcs.Wcsprm()
w.crota = [1, 0]
assert w.crota.dtype == float
assert w.has_crota() is True
assert_array_equal(w.crota, [1, 0])
del w.crota
assert w.has_crota() is False
def test_crota_missing():
w = _wcs.Wcsprm()
assert w.has_crota() is False
with pytest.raises(AttributeError):
w.crota
def test_crota_missing2():
w = _wcs.Wcsprm()
w.crota = [1, 0]
assert w.has_crota() is True
del w.crota
assert w.has_crota() is False
with pytest.raises(AttributeError):
w.crota
def test_crpix():
w = _wcs.Wcsprm()
assert w.crpix.dtype == float
assert_array_equal(w.crpix, [0, 0])
w.crpix = [42, 54]
assert_array_equal(w.crpix, [42, 54])
w.crpix[0] = 0
assert_array_equal(w.crpix, [0, 54])
with pytest.raises(ValueError):
w.crpix = [1, 2, 3]
def test_crval():
w = _wcs.Wcsprm()
assert w.crval.dtype == float
assert_array_equal(w.crval, [0, 0])
w.crval = [42, 54]
assert_array_equal(w.crval, [42, 54])
w.crval[0] = 0
assert_array_equal(w.crval, [0, 54])
def test_csyer():
w = _wcs.Wcsprm()
assert w.csyer.dtype == float
assert np.all(np.isnan(w.csyer))
w.csyer[0] = 0
assert np.isnan(w.csyer[1])
assert w.csyer[0] == 0
w.csyer = w.csyer
def test_ctype():
w = _wcs.Wcsprm()
assert list(w.ctype) == ["", ""]
w.ctype = [b"RA---TAN", "DEC--TAN"]
assert_array_equal(w.axis_types, [2200, 2201])
assert w.lat == 1
assert w.lng == 0
assert w.lattyp == "DEC"
assert w.lngtyp == "RA"
assert list(w.ctype) == ["RA---TAN", "DEC--TAN"]
w.ctype = ["foo", "bar"]
assert_array_equal(w.axis_types, [0, 0])
assert list(w.ctype) == ["foo", "bar"]
assert w.lat == -1
assert w.lng == -1
assert w.lattyp == "DEC"
assert w.lngtyp == "RA"
def test_ctype_repr():
w = _wcs.Wcsprm()
assert list(w.ctype) == ["", ""]
w.ctype = [b"RA-\t--TAN", "DEC-\n-TAN"]
assert repr(w.ctype == '["RA-\t--TAN", "DEC-\n-TAN"]')
def test_ctype_index_error():
w = _wcs.Wcsprm()
assert list(w.ctype) == ["", ""]
for idx in (2, -3):
with pytest.raises(IndexError):
w.ctype[idx]
with pytest.raises(IndexError):
w.ctype[idx] = "FOO"
def test_ctype_invalid_error():
w = _wcs.Wcsprm()
assert list(w.ctype) == ["", ""]
with pytest.raises(ValueError):
w.ctype[0] = "X" * 100
with pytest.raises(TypeError):
w.ctype[0] = True
with pytest.raises(TypeError):
w.ctype = ["a", 0]
with pytest.raises(TypeError):
w.ctype = None
with pytest.raises(ValueError):
w.ctype = ["a", "b", "c"]
with pytest.raises(ValueError):
w.ctype = ["FOO", "A" * 100]
def test_cubeface():
w = _wcs.Wcsprm()
assert w.cubeface == -1
w.cubeface = 0
with pytest.raises(OverflowError):
w.cubeface = -1
def test_cunit():
w = _wcs.Wcsprm()
assert list(w.cunit) == [u.Unit(""), u.Unit("")]
w.cunit = [u.m, "km"]
assert w.cunit[0] == u.m
assert w.cunit[1] == u.km
def test_cunit_invalid():
w = _wcs.Wcsprm()
with pytest.warns(u.UnitsWarning, match="foo") as warns:
w.cunit[0] = "foo"
assert len(warns) == 1
def test_cunit_invalid2():
w = _wcs.Wcsprm()
with pytest.warns(u.UnitsWarning) as warns:
w.cunit = ["foo", "bar"]
assert len(warns) == 2
assert "foo" in str(warns[0].message)
assert "bar" in str(warns[1].message)
def test_unit():
w = wcs.WCS()
w.wcs.cunit[0] = u.erg
assert w.wcs.cunit[0] == u.erg
assert repr(w.wcs.cunit) == "['erg', '']"
def test_unit2():
w = wcs.WCS()
with pytest.warns(UnitsWarning):
myunit = u.Unit("FOOBAR", parse_strict="warn")
w.wcs.cunit[0] = myunit
def test_unit3():
w = wcs.WCS()
for idx in (2, -3):
with pytest.raises(IndexError):
w.wcs.cunit[idx]
with pytest.raises(IndexError):
w.wcs.cunit[idx] = u.m
with pytest.raises(ValueError):
w.wcs.cunit = [u.m, u.m, u.m]
def test_unitfix():
w = _wcs.Wcsprm()
w.unitfix()
def test_cylfix():
# TODO: We need some data with broken cylindrical projections to
# test with. For now, this is just a smoke test.
w = _wcs.Wcsprm()
assert w.cylfix() == -1
assert w.cylfix([0, 1]) == -1
with pytest.raises(ValueError):
w.cylfix([0, 1, 2])
def test_dateavg():
w = _wcs.Wcsprm()
assert w.dateavg == ""
# TODO: When dateavg is verified, check that it works
def test_dateobs():
w = _wcs.Wcsprm()
assert w.dateobs == ""
# TODO: When dateavg is verified, check that it works
def test_datfix():
w = _wcs.Wcsprm()
w.dateobs = "31/12/99"
assert w.datfix() == 0
assert w.dateobs == "1999-12-31"
assert w.mjdobs == 51543.0
def test_equinox():
w = _wcs.Wcsprm()
assert np.isnan(w.equinox)
w.equinox = 0
assert w.equinox == 0
del w.equinox
assert np.isnan(w.equinox)
with pytest.raises(TypeError):
w.equinox = None
def test_fix():
w = _wcs.Wcsprm()
fix_ref = {
"cdfix": "No change",
"cylfix": "No change",
"obsfix": "No change",
"datfix": "No change",
"spcfix": "No change",
"unitfix": "No change",
"celfix": "No change",
}
version = wcs._wcs.__version__
if Version(version) <= Version("5"):
del fix_ref["obsfix"]
if Version(version) >= Version("7.1"):
w.dateref = "1858-11-17"
if Version("7.4") <= Version(version) < Version("7.6"):
fix_ref["datfix"] = "Success"
assert w.fix() == fix_ref
def test_fix2():
w = _wcs.Wcsprm()
w.dateobs = "31/12/99"
fix_ref = {
"cdfix": "No change",
"cylfix": "No change",
"obsfix": "No change",
"datfix": (
"Set MJD-OBS to 51543.000000 from DATE-OBS.\n"
"Changed DATE-OBS from '31/12/99' to '1999-12-31'"
),
"spcfix": "No change",
"unitfix": "No change",
"celfix": "No change",
}
version = wcs._wcs.__version__
if Version(version) <= Version("5"):
del fix_ref["obsfix"]
fix_ref["datfix"] = "Changed '31/12/99' to '1999-12-31'"
if Version(version) >= Version("7.3"):
fix_ref["datfix"] = (
"Set DATEREF to '1858-11-17' from MJDREF.\n" + fix_ref["datfix"]
)
elif Version(version) >= Version("7.1"):
fix_ref["datfix"] = (
"Set DATE-REF to '1858-11-17' from MJD-REF.\n" + fix_ref["datfix"]
)
assert w.fix() == fix_ref
assert w.dateobs == "1999-12-31"
assert w.mjdobs == 51543.0
def test_fix3():
w = _wcs.Wcsprm()
w.dateobs = "31/12/F9"
fix_ref = {
"cdfix": "No change",
"cylfix": "No change",
"obsfix": "No change",
"datfix": "Invalid DATE-OBS format '31/12/F9'",
"spcfix": "No change",
"unitfix": "No change",
"celfix": "No change",
}
version = wcs._wcs.__version__
if Version(version) <= Version("5"):
del fix_ref["obsfix"]
fix_ref["datfix"] = "Invalid parameter value: invalid date '31/12/F9'"
if Version(version) >= Version("7.3"):
fix_ref["datfix"] = (
"Set DATEREF to '1858-11-17' from MJDREF.\n" + fix_ref["datfix"]
)
elif Version(version) >= Version("7.1"):
fix_ref["datfix"] = (
"Set DATE-REF to '1858-11-17' from MJD-REF.\n" + fix_ref["datfix"]
)
assert w.fix() == fix_ref
assert w.dateobs == "31/12/F9"
assert np.isnan(w.mjdobs)
def test_fix4():
w = _wcs.Wcsprm()
with pytest.raises(ValueError):
w.fix("X")
def test_fix5():
w = _wcs.Wcsprm()
with pytest.raises(ValueError):
w.fix(naxis=[0, 1, 2])
def test_get_ps():
# TODO: We need some data with PSi_ma keywords
w = _wcs.Wcsprm()
assert len(w.get_ps()) == 0
def test_get_pv():
# TODO: We need some data with PVi_ma keywords
w = _wcs.Wcsprm()
assert len(w.get_pv()) == 0
def test_imgpix_matrix():
w = _wcs.Wcsprm()
with pytest.raises(AssertionError):
w.imgpix_matrix
def test_imgpix_matrix2():
w = _wcs.Wcsprm()
with pytest.raises(AttributeError):
w.imgpix_matrix = None
def test_isunity():
w = _wcs.Wcsprm()
assert w.is_unity()
def test_lat():
w = _wcs.Wcsprm()
assert w.lat == -1
def test_lat_set():
w = _wcs.Wcsprm()
with pytest.raises(AttributeError):
w.lat = 0
def test_latpole():
w = _wcs.Wcsprm()
assert w.latpole == 90.0
w.latpole = 45.0
assert w.latpole == 45.0
del w.latpole
assert w.latpole == 90.0
def test_lattyp():
w = _wcs.Wcsprm()
assert w.lattyp == " "
def test_lattyp_set():
w = _wcs.Wcsprm()
with pytest.raises(AttributeError):
w.lattyp = 0
def test_lng():
w = _wcs.Wcsprm()
assert w.lng == -1
def test_lng_set():
w = _wcs.Wcsprm()
with pytest.raises(AttributeError):
w.lng = 0
def test_lngtyp():
w = _wcs.Wcsprm()
assert w.lngtyp == " "
def test_lngtyp_set():
w = _wcs.Wcsprm()
with pytest.raises(AttributeError):
w.lngtyp = 0
def test_lonpole():
w = _wcs.Wcsprm()
assert np.isnan(w.lonpole)
w.lonpole = 45.0
assert w.lonpole == 45.0
del w.lonpole
assert np.isnan(w.lonpole)
def test_mix():
w = _wcs.Wcsprm()
w.ctype = [b"RA---TAN", "DEC--TAN"]
with pytest.raises(_wcs.InvalidCoordinateError):
w.mix(1, 1, [240, 480], 1, 5, [0, 2], [54, 32], 1)
def test_mjdavg():
w = _wcs.Wcsprm()
assert np.isnan(w.mjdavg)
w.mjdavg = 45.0
assert w.mjdavg == 45.0
del w.mjdavg
assert np.isnan(w.mjdavg)
def test_mjdobs():
w = _wcs.Wcsprm()
assert np.isnan(w.mjdobs)
w.mjdobs = 45.0
assert w.mjdobs == 45.0
del w.mjdobs
assert np.isnan(w.mjdobs)
def test_name():
w = _wcs.Wcsprm()
assert w.name == ""
w.name = "foo"
assert w.name == "foo"
def test_naxis():
w = _wcs.Wcsprm()
assert w.naxis == 2
def test_naxis_set():
w = _wcs.Wcsprm()
with pytest.raises(AttributeError):
w.naxis = 4
def test_obsgeo():
w = _wcs.Wcsprm()
assert np.all(np.isnan(w.obsgeo))
w.obsgeo = [1, 2, 3, 4, 5, 6]
assert_array_equal(w.obsgeo, [1, 2, 3, 4, 5, 6])
del w.obsgeo
assert np.all(np.isnan(w.obsgeo))
def test_pc():
w = _wcs.Wcsprm()
assert w.has_pc()
assert_array_equal(w.pc, [[1, 0], [0, 1]])
w.cd = [[1, 0], [0, 1]]
assert not w.has_pc()
del w.cd
assert w.has_pc()
assert_array_equal(w.pc, [[1, 0], [0, 1]])
w.pc = w.pc
def test_pc_missing():
w = _wcs.Wcsprm()
w.cd = [[1, 0], [0, 1]]
assert not w.has_pc()
with pytest.raises(AttributeError):
w.pc
def test_phi0():
w = _wcs.Wcsprm()
assert np.isnan(w.phi0)
w.phi0 = 42.0
assert w.phi0 == 42.0
del w.phi0
assert np.isnan(w.phi0)
def test_piximg_matrix():
w = _wcs.Wcsprm()
with pytest.raises(AssertionError):
w.piximg_matrix
def test_piximg_matrix2():
w = _wcs.Wcsprm()
with pytest.raises(AttributeError):
w.piximg_matrix = None
def test_print_contents():
# In general, this is human-consumable, so we don't care if the
# content changes, just check the type
w = _wcs.Wcsprm()
assert isinstance(str(w), str)
def test_radesys():
w = _wcs.Wcsprm()
assert w.radesys == ""
w.radesys = "foo"
assert w.radesys == "foo"
def test_restfrq():
w = _wcs.Wcsprm()
assert w.restfrq == 0.0
w.restfrq = np.nan
assert np.isnan(w.restfrq)
del w.restfrq
def test_restwav():
w = _wcs.Wcsprm()
assert w.restwav == 0.0
w.restwav = np.nan
assert np.isnan(w.restwav)
del w.restwav
def test_set_ps():
w = _wcs.Wcsprm()
data = [(0, 0, "param1"), (1, 1, "param2")]
w.set_ps(data)
assert w.get_ps() == data
def test_set_ps_realloc():
w = _wcs.Wcsprm()
w.set_ps([(0, 0, "param1")] * 16)
def test_set_pv():
w = _wcs.Wcsprm()
data = [(0, 0, 42.0), (1, 1, 54.0)]
w.set_pv(data)
assert w.get_pv() == data
def test_set_pv_realloc():
w = _wcs.Wcsprm()
w.set_pv([(0, 0, 42.0)] * 16)
def test_spcfix():
# TODO: We need some data with broken spectral headers here to
# really test
header = get_pkg_data_contents("data/spectra/orion-velo-1.hdr", encoding="binary")
w = _wcs.Wcsprm(header)
assert w.spcfix() == -1
def test_spec():
w = _wcs.Wcsprm()
assert w.spec == -1
def test_spec_set():
w = _wcs.Wcsprm()
with pytest.raises(AttributeError):
w.spec = 0
def test_specsys():
w = _wcs.Wcsprm()
assert w.specsys == ""
w.specsys = "foo"
assert w.specsys == "foo"
def test_sptr():
# TODO: Write me
pass
def test_ssysobs():
w = _wcs.Wcsprm()
assert w.ssysobs == ""
w.ssysobs = "foo"
assert w.ssysobs == "foo"
def test_ssyssrc():
w = _wcs.Wcsprm()
assert w.ssyssrc == ""
w.ssyssrc = "foo"
assert w.ssyssrc == "foo"
def test_tab():
w = _wcs.Wcsprm()
assert len(w.tab) == 0
# TODO: Inject some headers that have tables and test
def test_theta0():
w = _wcs.Wcsprm()
assert np.isnan(w.theta0)
w.theta0 = 42.0
assert w.theta0 == 42.0
del w.theta0
assert np.isnan(w.theta0)
def test_toheader():
w = _wcs.Wcsprm()
assert isinstance(w.to_header(), str)
def test_velangl():
w = _wcs.Wcsprm()
assert np.isnan(w.velangl)
w.velangl = 42.0
assert w.velangl == 42.0
del w.velangl
assert np.isnan(w.velangl)
def test_velosys():
w = _wcs.Wcsprm()
assert np.isnan(w.velosys)
w.velosys = 42.0
assert w.velosys == 42.0
del w.velosys
assert np.isnan(w.velosys)
def test_velref():
w = _wcs.Wcsprm()
assert w.velref == 0.0
w.velref = 42
assert w.velref == 42.0
del w.velref
assert w.velref == 0.0
def test_zsource():
w = _wcs.Wcsprm()
assert np.isnan(w.zsource)
w.zsource = 42.0
assert w.zsource == 42.0
del w.zsource
assert np.isnan(w.zsource)
def test_cd_3d():
header = get_pkg_data_contents("data/3d_cd.hdr", encoding="binary")
w = _wcs.Wcsprm(header)
assert w.cd.shape == (3, 3)
assert w.get_pc().shape == (3, 3)
assert w.get_cdelt().shape == (3,)
def test_get_pc():
header = get_pkg_data_contents("data/3d_cd.hdr", encoding="binary")
w = _wcs.Wcsprm(header)
pc = w.get_pc()
try:
pc[0, 0] = 42
except (RuntimeError, ValueError):
pass
else:
raise AssertionError()
def test_detailed_err():
w = _wcs.Wcsprm()
w.pc = [[0, 0], [0, 0]]
with pytest.raises(_wcs.SingularMatrixError):
w.set()
def test_header_parse():
from astropy.io import fits
with get_pkg_data_fileobj(
"data/header_newlines.fits", encoding="binary"
) as test_file:
hdulist = fits.open(test_file)
with pytest.warns(FITSFixedWarning):
w = wcs.WCS(hdulist[0].header)
assert w.wcs.ctype[0] == "RA---TAN-SIP"
def test_locale():
try:
with _set_locale("fr_FR"):
header = get_pkg_data_contents("data/locale.hdr", encoding="binary")
with pytest.warns(FITSFixedWarning):
w = _wcs.Wcsprm(header)
assert re.search("[0-9]+,[0-9]*", w.to_header()) is None
except locale.Error:
pytest.xfail(
"Can't set to 'fr_FR' locale, perhaps because it is not installed "
"on this system"
)
def test_unicode():
w = _wcs.Wcsprm()
with pytest.raises(UnicodeEncodeError):
w.alt = "‰"
def test_sub_segfault():
"""Issue #1960"""
header = fits.Header.fromtextfile(get_pkg_data_filename("data/sub-segfault.hdr"))
w = wcs.WCS(header)
w.sub([wcs.WCSSUB_CELESTIAL])
gc.collect()
def test_bounds_check():
w = _wcs.Wcsprm()
w.bounds_check(False)
def test_wcs_sub_error_message():
"""Issue #1587"""
w = _wcs.Wcsprm()
with pytest.raises(TypeError, match="axes must None, a sequence or an integer$"):
w.sub("latitude")
def test_wcs_sub():
"""Issue #3356"""
w = _wcs.Wcsprm()
w.sub(["latitude"])
w = _wcs.Wcsprm()
w.sub([b"latitude"])
def test_compare():
header = get_pkg_data_contents("data/3d_cd.hdr", encoding="binary")
w = _wcs.Wcsprm(header)
w2 = _wcs.Wcsprm(header)
assert w == w2
w.equinox = 42
assert w == w2
assert not w.compare(w2)
assert w.compare(w2, _wcs.WCSCOMPARE_ANCILLARY)
w = _wcs.Wcsprm(header)
w2 = _wcs.Wcsprm(header)
with pytest.warns(RuntimeWarning):
w.cdelt[0] = np.float32(0.00416666666666666666666666)
w2.cdelt[0] = np.float64(0.00416666666666666666666666)
assert not w.compare(w2)
assert w.compare(w2, tolerance=1e-6)
def test_radesys_defaults():
w = _wcs.Wcsprm()
w.ctype = ["RA---TAN", "DEC--TAN"]
w.set()
assert w.radesys == "ICRS"
def test_radesys_defaults_full():
# As described in Section 3.1 of the FITS standard "Equatorial and ecliptic
# coordinates", for those systems the RADESYS keyword can be used to
# indicate the equatorial/ecliptic frame to use. From the standard:
# "For RADESYSa values of FK4 and FK4-NO-E, any stated equinox is Besselian
# and, if neither EQUINOXa nor EPOCH are given, a default of 1950.0 is to
# be taken. For FK5, any stated equinox is Julian and, if neither keyword
# is given, it defaults to 2000.0.
# "If the EQUINOXa keyword is given it should always be accompanied by
# RADESYS a. However, if it should happen to ap- pear by itself then
# RADESYSa defaults to FK4 if EQUINOXa < 1984.0, or to FK5 if EQUINOXa
# 1984.0. Note that these defaults, while probably true of older files
# using the EPOCH keyword, are not required of them.
# By default RADESYS is empty
w = _wcs.Wcsprm(naxis=2)
assert w.radesys == ""
assert np.isnan(w.equinox)
# For non-ecliptic or equatorial systems it is still empty
w = _wcs.Wcsprm(naxis=2)
for ctype in [("GLON-CAR", "GLAT-CAR"), ("SLON-SIN", "SLAT-SIN")]:
w.ctype = ctype
w.set()
assert w.radesys == ""
assert np.isnan(w.equinox)
for ctype in [
("RA---TAN", "DEC--TAN"),
("ELON-TAN", "ELAT-TAN"),
("DEC--TAN", "RA---TAN"),
("ELAT-TAN", "ELON-TAN"),
]:
# Check defaults for RADESYS
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.set()
assert w.radesys == "ICRS"
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.equinox = 1980
w.set()
assert w.radesys == "FK4"
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.equinox = 1984
w.set()
assert w.radesys == "FK5"
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.radesys = "foo"
w.set()
assert w.radesys == "foo"
# Check defaults for EQUINOX
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.set()
assert np.isnan(w.equinox) # frame is ICRS, no equinox
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.radesys = "ICRS"
w.set()
assert np.isnan(w.equinox)
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.radesys = "FK5"
w.set()
assert w.equinox == 2000.0
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.radesys = "FK4"
w.set()
assert w.equinox == 1950
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.radesys = "FK4-NO-E"
w.set()
assert w.equinox == 1950
def test_iteration():
world = np.array(
[
[-0.58995335, -0.5],
[0.00664326, -0.5],
[-0.58995335, -0.25],
[0.00664326, -0.25],
[-0.58995335, 0.0],
[0.00664326, 0.0],
[-0.58995335, 0.25],
[0.00664326, 0.25],
[-0.58995335, 0.5],
[0.00664326, 0.5],
],
float,
)
w = wcs.WCS()
w.wcs.ctype = ["GLON-CAR", "GLAT-CAR"]
w.wcs.cdelt = [-0.006666666828, 0.006666666828]
w.wcs.crpix = [75.907, 74.8485]
x = w.wcs_world2pix(world, 1)
expected = np.array(
[
[1.64400000e02, -1.51498185e-01],
[7.49105110e01, -1.51498185e-01],
[1.64400000e02, 3.73485009e01],
[7.49105110e01, 3.73485009e01],
[1.64400000e02, 7.48485000e01],
[7.49105110e01, 7.48485000e01],
[1.64400000e02, 1.12348499e02],
[7.49105110e01, 1.12348499e02],
[1.64400000e02, 1.49848498e02],
[7.49105110e01, 1.49848498e02],
],
float,
)
assert_array_almost_equal(x, expected)
w2 = w.wcs_pix2world(x, 1)
world[:, 0] %= 360.0
assert_array_almost_equal(w2, world)
def test_invalid_args():
with pytest.raises(TypeError):
_wcs.Wcsprm(keysel="A")
with pytest.raises(ValueError):
_wcs.Wcsprm(keysel=2)
with pytest.raises(ValueError):
_wcs.Wcsprm(colsel=2)
with pytest.raises(ValueError):
_wcs.Wcsprm(naxis=64)
header = get_pkg_data_contents("data/spectra/orion-velo-1.hdr", encoding="binary")
with pytest.raises(ValueError):
_wcs.Wcsprm(header, relax="FOO")
with pytest.raises(ValueError):
_wcs.Wcsprm(header, naxis=3)
with pytest.raises(KeyError):
_wcs.Wcsprm(header, key="A")
# Test keywords in the Time standard
def test_datebeg():
w = _wcs.Wcsprm()
assert w.datebeg == ""
w.datebeg = "2001-02-11"
assert w.datebeg == "2001-02-11"
w.datebeg = "31/12/99"
fix_ref = {
"cdfix": "No change",
"cylfix": "No change",
"obsfix": "No change",
"datfix": "Invalid DATE-BEG format '31/12/99'",
"spcfix": "No change",
"unitfix": "No change",
"celfix": "No change",
}
if Version(wcs._wcs.__version__) >= Version("7.3"):
fix_ref["datfix"] = (
"Set DATEREF to '1858-11-17' from MJDREF.\n" + fix_ref["datfix"]
)
elif Version(wcs._wcs.__version__) >= Version("7.1"):
fix_ref["datfix"] = (
"Set DATE-REF to '1858-11-17' from MJD-REF.\n" + fix_ref["datfix"]
)
assert w.fix() == fix_ref
char_keys = [
"timesys",
"trefpos",
"trefdir",
"plephem",
"timeunit",
"dateref",
"dateavg",
"dateend",
]
@pytest.mark.parametrize("key", char_keys)
def test_char_keys(key):
w = _wcs.Wcsprm()
assert getattr(w, key) == ""
setattr(w, key, "foo")
assert getattr(w, key) == "foo"
with pytest.raises(TypeError):
setattr(w, key, 42)
num_keys = [
"mjdobs",
"mjdbeg",
"mjdend",
"jepoch",
"bepoch",
"tstart",
"tstop",
"xposure",
"timsyer",
"timrder",
"timedel",
"timepixr",
"timeoffs",
"telapse",
"xposure",
]
@pytest.mark.parametrize("key", num_keys)
def test_num_keys(key):
w = _wcs.Wcsprm()
assert np.isnan(getattr(w, key))
setattr(w, key, 42.0)
assert getattr(w, key) == 42.0
delattr(w, key)
assert np.isnan(getattr(w, key))
with pytest.raises(TypeError):
setattr(w, key, "foo")
@pytest.mark.parametrize("key", ["czphs", "cperi", "mjdref"])
def test_array_keys(key):
w = _wcs.Wcsprm()
attr = getattr(w, key)
if key == "mjdref" and Version(_wcs.__version__) >= Version("7.1"):
assert np.allclose(attr, [0, 0])
else:
assert np.all(np.isnan(attr))
assert attr.dtype == float
setattr(w, key, [1.0, 2.0])
assert_array_equal(getattr(w, key), [1.0, 2.0])
with pytest.raises(ValueError):
setattr(w, key, ["foo", "bar"])
with pytest.raises(ValueError):
setattr(w, key, "foo")
|
2a29c5a328db99846e77746e4cc63e20905e597b65c50ee09d7103f27ffd0d96 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy.io import fits
class SimModelTAB:
def __init__(
self,
nx=150,
ny=200,
crpix=[1, 1],
crval=[1, 1],
cdelt=[1, 1],
pc={"PC1_1": 1, "PC2_2": 1},
):
# set essential parameters of the model (coord transformations):
assert nx > 2 and ny > 1 # a limitation of this particular simulation
self.nx = nx
self.ny = ny
self.crpix = crpix
self.crval = crval
self.cdelt = cdelt
self.pc = pc
def fwd_eval(self, xy):
xb = 1 + self.nx // 3
px = np.array([1, xb, xb, self.nx + 1])
py = np.array([1, self.ny + 1])
xi = self.crval[0] + self.cdelt[0] * (px - self.crpix[0])
yi = self.crval[1] + self.cdelt[1] * (py - self.crpix[1])
cx = np.array([0.0, 0.26, 0.8, 1.0])
cy = np.array([-0.5, 0.5])
xy = np.atleast_2d(xy)
x = xy[:, 0]
y = xy[:, 1]
mbad = (x < px[0]) | (y < py[0]) | (x > px[-1]) | (y > py[-1])
mgood = np.logical_not(mbad)
i = 2 * (x > xb).astype(int)
psix = self.crval[0] + self.cdelt[0] * (x - self.crpix[0])
psiy = self.crval[1] + self.cdelt[1] * (y - self.crpix[1])
cfx = (psix - xi[i]) / (xi[i + 1] - xi[i])
cfy = (psiy - yi[0]) / (yi[1] - yi[0])
ra = cx[i] + cfx * (cx[i + 1] - cx[i])
dec = cy[0] + cfy * (cy[1] - cy[0])
return np.dstack([ra, dec])[0]
@property
def hdulist(self):
"""Simulates 2D data with a _spatial_ WCS that uses the ``-TAB``
algorithm with indexing.
"""
# coordinate array (some "arbitrary" numbers with a "jump" along x axis):
x = np.array([[0.0, 0.26, 0.8, 1.0], [0.0, 0.26, 0.8, 1.0]])
y = np.array([[-0.5, -0.5, -0.5, -0.5], [0.5, 0.5, 0.5, 0.5]])
c = np.dstack([x, y])
# index arrays (skip PC matrix for simplicity - assume it is an
# identity matrix):
xb = 1 + self.nx // 3
px = np.array([1, xb, xb, self.nx + 1])
py = np.array([1, self.ny + 1])
xi = self.crval[0] + self.cdelt[0] * (px - self.crpix[0])
yi = self.crval[1] + self.cdelt[1] * (py - self.crpix[1])
# structured array (data) for binary table HDU:
arr = np.array(
[(c, xi, yi)],
dtype=[
("wavelength", np.float64, c.shape),
("xi", np.double, (xi.size,)),
("yi", np.double, (yi.size,)),
],
)
# create binary table HDU:
bt = fits.BinTableHDU(arr)
bt.header["EXTNAME"] = "WCS-TABLE"
# create primary header:
image_data = np.ones((self.ny, self.nx), dtype=np.float32)
pu = fits.PrimaryHDU(image_data)
pu.header["ctype1"] = "RA---TAB"
pu.header["ctype2"] = "DEC--TAB"
pu.header["naxis1"] = self.nx
pu.header["naxis2"] = self.ny
pu.header["PS1_0"] = "WCS-TABLE"
pu.header["PS2_0"] = "WCS-TABLE"
pu.header["PS1_1"] = "wavelength"
pu.header["PS2_1"] = "wavelength"
pu.header["PV1_3"] = 1
pu.header["PV2_3"] = 2
pu.header["CUNIT1"] = "deg"
pu.header["CUNIT2"] = "deg"
pu.header["CDELT1"] = self.cdelt[0]
pu.header["CDELT2"] = self.cdelt[1]
pu.header["CRPIX1"] = self.crpix[0]
pu.header["CRPIX2"] = self.crpix[1]
pu.header["CRVAL1"] = self.crval[0]
pu.header["CRVAL2"] = self.crval[1]
pu.header["PS1_2"] = "xi"
pu.header["PS2_2"] = "yi"
for k, v in self.pc.items():
pu.header[k] = v
hdulist = fits.HDUList([pu, bt])
return hdulist
|
48a38d36dec6d244ac4789737eb24b01cb25499a87f93150b04e40322339d956 | import numbers
from collections import defaultdict
import numpy as np
from astropy.utils import isiterable
from astropy.utils.decorators import lazyproperty
from .base import BaseWCSWrapper
__all__ = ["sanitize_slices", "SlicedLowLevelWCS"]
def sanitize_slices(slices, ndim):
"""
Given a slice as input sanitise it to an easier to parse format.format.
This function returns a list ``ndim`` long containing slice objects (or ints).
"""
if not isinstance(slices, (tuple, list)): # We just have a single int
slices = (slices,)
if len(slices) > ndim:
raise ValueError(
f"The dimensionality of the specified slice {slices} can not be greater "
f"than the dimensionality ({ndim}) of the wcs."
)
if any(isiterable(s) for s in slices):
raise IndexError(
"This slice is invalid, only integer or range slices are supported."
)
slices = list(slices)
if Ellipsis in slices:
if slices.count(Ellipsis) > 1:
raise IndexError("an index can only have a single ellipsis ('...')")
# Replace the Ellipsis with the correct number of slice(None)s
e_ind = slices.index(Ellipsis)
slices.remove(Ellipsis)
n_e = ndim - len(slices)
for i in range(n_e):
ind = e_ind + i
slices.insert(ind, slice(None))
for i in range(ndim):
if i < len(slices):
slc = slices[i]
if isinstance(slc, slice):
if slc.step and slc.step != 1:
raise IndexError("Slicing WCS with a step is not supported.")
elif not isinstance(slc, numbers.Integral):
raise IndexError("Only integer or range slices are accepted.")
else:
slices.append(slice(None))
return slices
def combine_slices(slice1, slice2):
"""
Given two slices that can be applied to a 1-d array, find the resulting
slice that corresponds to the combination of both slices. We assume that
slice2 can be an integer, but slice1 cannot.
"""
if isinstance(slice1, slice) and slice1.step is not None:
raise ValueError("Only slices with steps of 1 are supported")
if isinstance(slice2, slice) and slice2.step is not None:
raise ValueError("Only slices with steps of 1 are supported")
if isinstance(slice2, numbers.Integral):
if slice1.start is None:
return slice2
else:
return slice2 + slice1.start
if slice1.start is None:
if slice1.stop is None:
return slice2
else:
if slice2.stop is None:
return slice(slice2.start, slice1.stop)
else:
return slice(slice2.start, min(slice1.stop, slice2.stop))
else:
if slice2.start is None:
start = slice1.start
else:
start = slice1.start + slice2.start
if slice2.stop is None:
stop = slice1.stop
else:
if slice1.start is None:
stop = slice2.stop
else:
stop = slice2.stop + slice1.start
if slice1.stop is not None:
stop = min(slice1.stop, stop)
return slice(start, stop)
class SlicedLowLevelWCS(BaseWCSWrapper):
"""
A Low Level WCS wrapper which applies an array slice to a WCS.
This class does not modify the underlying WCS object and can therefore drop
coupled dimensions as it stores which pixel and world dimensions have been
sliced out (or modified) in the underlying WCS and returns the modified
results on all the Low Level WCS methods.
Parameters
----------
wcs : `~astropy.wcs.wcsapi.BaseLowLevelWCS`
The WCS to slice.
slices : `slice` or `tuple` or `int`
A valid array slice to apply to the WCS.
"""
def __init__(self, wcs, slices):
slices = sanitize_slices(slices, wcs.pixel_n_dim)
if isinstance(wcs, SlicedLowLevelWCS):
# Here we combine the current slices with the previous slices
# to avoid ending up with many nested WCSes
self._wcs = wcs._wcs
slices_original = wcs._slices_array.copy()
for ipixel in range(wcs.pixel_n_dim):
ipixel_orig = wcs._wcs.pixel_n_dim - 1 - wcs._pixel_keep[ipixel]
ipixel_new = wcs.pixel_n_dim - 1 - ipixel
slices_original[ipixel_orig] = combine_slices(
slices_original[ipixel_orig], slices[ipixel_new]
)
self._slices_array = slices_original
else:
self._wcs = wcs
self._slices_array = slices
self._slices_pixel = self._slices_array[::-1]
# figure out which pixel dimensions have been kept, then use axis correlation
# matrix to figure out which world dims are kept
self._pixel_keep = np.nonzero(
[
not isinstance(self._slices_pixel[ip], numbers.Integral)
for ip in range(self._wcs.pixel_n_dim)
]
)[0]
# axis_correlation_matrix[world, pixel]
self._world_keep = np.nonzero(
self._wcs.axis_correlation_matrix[:, self._pixel_keep].any(axis=1)
)[0]
if len(self._pixel_keep) == 0 or len(self._world_keep) == 0:
raise ValueError(
"Cannot slice WCS: the resulting WCS should have "
"at least one pixel and one world dimension."
)
@lazyproperty
def dropped_world_dimensions(self):
"""
Information describing the dropped world dimensions.
"""
world_coords = self._pixel_to_world_values_all(*[0] * len(self._pixel_keep))
dropped_info = defaultdict(list)
for i in range(self._wcs.world_n_dim):
if i in self._world_keep:
continue
if "world_axis_object_classes" not in dropped_info:
dropped_info["world_axis_object_classes"] = dict()
wao_classes = self._wcs.world_axis_object_classes
wao_components = self._wcs.world_axis_object_components
dropped_info["value"].append(world_coords[i])
dropped_info["world_axis_names"].append(self._wcs.world_axis_names[i])
dropped_info["world_axis_physical_types"].append(
self._wcs.world_axis_physical_types[i]
)
dropped_info["world_axis_units"].append(self._wcs.world_axis_units[i])
dropped_info["world_axis_object_components"].append(wao_components[i])
dropped_info["world_axis_object_classes"].update(
dict(
filter(lambda x: x[0] == wao_components[i][0], wao_classes.items())
)
)
dropped_info["serialized_classes"] = self.serialized_classes
return dict(dropped_info)
@property
def pixel_n_dim(self):
return len(self._pixel_keep)
@property
def world_n_dim(self):
return len(self._world_keep)
@property
def world_axis_physical_types(self):
return [self._wcs.world_axis_physical_types[i] for i in self._world_keep]
@property
def world_axis_units(self):
return [self._wcs.world_axis_units[i] for i in self._world_keep]
@property
def pixel_axis_names(self):
return [self._wcs.pixel_axis_names[i] for i in self._pixel_keep]
@property
def world_axis_names(self):
return [self._wcs.world_axis_names[i] for i in self._world_keep]
def _pixel_to_world_values_all(self, *pixel_arrays):
pixel_arrays = tuple(map(np.asanyarray, pixel_arrays))
pixel_arrays_new = []
ipix_curr = -1
for ipix in range(self._wcs.pixel_n_dim):
if isinstance(self._slices_pixel[ipix], numbers.Integral):
pixel_arrays_new.append(self._slices_pixel[ipix])
else:
ipix_curr += 1
if self._slices_pixel[ipix].start is not None:
pixel_arrays_new.append(
pixel_arrays[ipix_curr] + self._slices_pixel[ipix].start
)
else:
pixel_arrays_new.append(pixel_arrays[ipix_curr])
pixel_arrays_new = np.broadcast_arrays(*pixel_arrays_new)
return self._wcs.pixel_to_world_values(*pixel_arrays_new)
def pixel_to_world_values(self, *pixel_arrays):
world_arrays = self._pixel_to_world_values_all(*pixel_arrays)
# Detect the case of a length 0 array
if isinstance(world_arrays, np.ndarray) and not world_arrays.shape:
return world_arrays
if self._wcs.world_n_dim > 1:
# Select the dimensions of the original WCS we are keeping.
world_arrays = [world_arrays[iw] for iw in self._world_keep]
# If there is only one world dimension (after slicing) we shouldn't return a tuple.
if self.world_n_dim == 1:
world_arrays = world_arrays[0]
return world_arrays
def world_to_pixel_values(self, *world_arrays):
sliced_out_world_coords = self._pixel_to_world_values_all(
*[0] * len(self._pixel_keep)
)
world_arrays = tuple(map(np.asanyarray, world_arrays))
world_arrays_new = []
iworld_curr = -1
for iworld in range(self._wcs.world_n_dim):
if iworld in self._world_keep:
iworld_curr += 1
world_arrays_new.append(world_arrays[iworld_curr])
else:
world_arrays_new.append(sliced_out_world_coords[iworld])
world_arrays_new = np.broadcast_arrays(*world_arrays_new)
pixel_arrays = list(self._wcs.world_to_pixel_values(*world_arrays_new))
for ipixel in range(self._wcs.pixel_n_dim):
if (
isinstance(self._slices_pixel[ipixel], slice)
and self._slices_pixel[ipixel].start is not None
):
pixel_arrays[ipixel] -= self._slices_pixel[ipixel].start
# Detect the case of a length 0 array
if isinstance(pixel_arrays, np.ndarray) and not pixel_arrays.shape:
return pixel_arrays
pixel = tuple(pixel_arrays[ip] for ip in self._pixel_keep)
if self.pixel_n_dim == 1 and self._wcs.pixel_n_dim > 1:
pixel = pixel[0]
return pixel
@property
def world_axis_object_components(self):
return [self._wcs.world_axis_object_components[idx] for idx in self._world_keep]
@property
def world_axis_object_classes(self):
keys_keep = [item[0] for item in self.world_axis_object_components]
return dict(
[
item
for item in self._wcs.world_axis_object_classes.items()
if item[0] in keys_keep
]
)
@property
def array_shape(self):
if self._wcs.array_shape:
return np.broadcast_to(0, self._wcs.array_shape)[
tuple(self._slices_array)
].shape
@property
def pixel_shape(self):
if self.array_shape:
return tuple(self.array_shape[::-1])
@property
def pixel_bounds(self):
if self._wcs.pixel_bounds is None:
return
bounds = []
for idx in self._pixel_keep:
if self._slices_pixel[idx].start is None:
bounds.append(self._wcs.pixel_bounds[idx])
else:
imin, imax = self._wcs.pixel_bounds[idx]
start = self._slices_pixel[idx].start
bounds.append((imin - start, imax - start))
return tuple(bounds)
@property
def axis_correlation_matrix(self):
return self._wcs.axis_correlation_matrix[self._world_keep][:, self._pixel_keep]
|
8e10e9fbaccfcab438b05e88dc51c00275e7c6770d8a587366286f17a4ea8266 | # Licensed under a 3-clause BSD style license - see LICNSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""Handles the CDS string format for units."""
import operator
import re
from astropy.units.utils import is_effectively_unity
from astropy.utils import classproperty, parsing
from astropy.utils.misc import did_you_mean
from . import core, utils
from .base import Base
class CDS(Base):
"""
Support the `Centre de Données astronomiques de Strasbourg
<http://cds.u-strasbg.fr/>`_ `Standards for Astronomical
Catalogues 2.0 <http://vizier.u-strasbg.fr/vizier/doc/catstd-3.2.htx>`_
format, and the `complete set of supported units
<https://vizier.u-strasbg.fr/viz-bin/Unit>`_. This format is used
by VOTable up to version 1.2.
"""
_tokens = (
"PRODUCT",
"DIVISION",
"OPEN_PAREN",
"CLOSE_PAREN",
"OPEN_BRACKET",
"CLOSE_BRACKET",
"X",
"SIGN",
"UINT",
"UFLOAT",
"UNIT",
"DIMENSIONLESS",
)
@classproperty(lazy=True)
def _units(cls):
return cls._generate_unit_names()
@classproperty(lazy=True)
def _parser(cls):
return cls._make_parser()
@classproperty(lazy=True)
def _lexer(cls):
return cls._make_lexer()
@staticmethod
def _generate_unit_names():
from astropy import units as u
from astropy.units import cds
names = {}
for key, val in cds.__dict__.items():
if isinstance(val, u.UnitBase):
names[key] = val
return names
@classmethod
def _make_lexer(cls):
tokens = cls._tokens
t_PRODUCT = r"\."
t_DIVISION = r"/"
t_OPEN_PAREN = r"\("
t_CLOSE_PAREN = r"\)"
t_OPEN_BRACKET = r"\["
t_CLOSE_BRACKET = r"\]"
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r"((\d+\.?\d+)|(\.\d+))([eE][+-]?\d+)?"
if not re.search(r"[eE\.]", t.value):
t.type = "UINT"
t.value = int(t.value)
else:
t.value = float(t.value)
return t
def t_UINT(t):
r"\d+"
t.value = int(t.value)
return t
def t_SIGN(t):
r"[+-](?=\d)"
t.value = float(t.value + "1")
return t
def t_X(t): # multiplication for factor in front of unit
r"[x×]"
return t
def t_UNIT(t):
r"\%|°|\\h|((?!\d)\w)+"
t.value = cls._get_unit(t)
return t
def t_DIMENSIONLESS(t):
r"---|-"
# These are separate from t_UNIT since they cannot have a prefactor.
t.value = cls._get_unit(t)
return t
t_ignore = ""
# Error handling rule
def t_error(t):
raise ValueError(f"Invalid character at col {t.lexpos}")
return parsing.lex(
lextab="cds_lextab", package="astropy/units", reflags=int(re.UNICODE)
)
@classmethod
def _make_parser(cls):
"""
The grammar here is based on the description in the `Standards
for Astronomical Catalogues 2.0
<http://vizier.u-strasbg.fr/vizier/doc/catstd-3.2.htx>`_, which is not
terribly precise. The exact grammar is here is based on the
YACC grammar in the `unity library
<https://bitbucket.org/nxg/unity/>`_.
"""
tokens = cls._tokens
def p_main(p):
"""
main : factor combined_units
| combined_units
| DIMENSIONLESS
| OPEN_BRACKET combined_units CLOSE_BRACKET
| OPEN_BRACKET DIMENSIONLESS CLOSE_BRACKET
| factor
"""
from astropy.units import dex
from astropy.units.core import Unit
if len(p) == 3:
p[0] = Unit(p[1] * p[2])
elif len(p) == 4:
p[0] = dex(p[2])
else:
p[0] = Unit(p[1])
def p_combined_units(p):
"""
combined_units : product_of_units
| division_of_units
"""
p[0] = p[1]
def p_product_of_units(p):
"""
product_of_units : unit_expression PRODUCT combined_units
| unit_expression
"""
if len(p) == 4:
p[0] = p[1] * p[3]
else:
p[0] = p[1]
def p_division_of_units(p):
"""
division_of_units : DIVISION unit_expression
| unit_expression DIVISION combined_units
"""
if len(p) == 3:
p[0] = p[2] ** -1
else:
p[0] = p[1] / p[3]
def p_unit_expression(p):
"""
unit_expression : unit_with_power
| OPEN_PAREN combined_units CLOSE_PAREN
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
def p_factor(p):
"""
factor : signed_float X UINT signed_int
| UINT X UINT signed_int
| UINT signed_int
| UINT
| signed_float
"""
if len(p) == 5:
if p[3] != 10:
raise ValueError("Only base ten exponents are allowed in CDS")
p[0] = p[1] * 10.0 ** p[4]
elif len(p) == 3:
if p[1] != 10:
raise ValueError("Only base ten exponents are allowed in CDS")
p[0] = 10.0 ** p[2]
elif len(p) == 2:
p[0] = p[1]
def p_unit_with_power(p):
"""
unit_with_power : UNIT numeric_power
| UNIT
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[1] ** p[2]
def p_numeric_power(p):
"""
numeric_power : sign UINT
"""
p[0] = p[1] * p[2]
def p_sign(p):
"""
sign : SIGN
|
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_signed_int(p):
"""
signed_int : SIGN UINT
"""
p[0] = p[1] * p[2]
def p_signed_float(p):
"""
signed_float : sign UINT
| sign UFLOAT
"""
p[0] = p[1] * p[2]
def p_error(p):
raise ValueError()
return parsing.yacc(tabmodule="cds_parsetab", package="astropy/units")
@classmethod
def _get_unit(cls, t):
try:
return cls._parse_unit(t.value)
except ValueError as e:
registry = core.get_current_unit_registry()
if t.value in registry.aliases:
return registry.aliases[t.value]
raise ValueError(f"At col {t.lexpos}, {str(e)}")
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
if detailed_exception:
raise ValueError(
"Unit '{}' not supported by the CDS SAC standard. {}".format(
unit, did_you_mean(unit, cls._units)
)
)
else:
raise ValueError()
return cls._units[unit]
@classmethod
def parse(cls, s, debug=False):
if " " in s:
raise ValueError("CDS unit must not contain whitespace")
if not isinstance(s, str):
s = s.decode("ascii")
# This is a short circuit for the case where the string
# is just a single unit name
try:
return cls._parse_unit(s, detailed_exception=False)
except ValueError:
try:
return cls._parser.parse(s, lexer=cls._lexer, debug=debug)
except ValueError as e:
if str(e):
raise ValueError(str(e))
else:
raise ValueError("Syntax error")
@staticmethod
def _get_unit_name(unit):
return unit.get_format_name("cds")
@classmethod
def _format_unit_list(cls, units):
out = []
for base, power in units:
if power == 1:
out.append(cls._get_unit_name(base))
else:
out.append(f"{cls._get_unit_name(base)}{int(power)}")
return ".".join(out)
@classmethod
def to_string(cls, unit):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if isinstance(unit, core.CompositeUnit):
if unit == core.dimensionless_unscaled:
return "---"
elif is_effectively_unity(unit.scale * 100.0):
return "%"
if unit.scale == 1:
s = ""
else:
m, e = utils.split_mantissa_exponent(unit.scale)
parts = []
if m not in ("", "1"):
parts.append(m)
if e:
if not e.startswith("-"):
e = "+" + e
parts.append(f"10{e}")
s = "x".join(parts)
pairs = list(zip(unit.bases, unit.powers))
if len(pairs) > 0:
pairs.sort(key=operator.itemgetter(1), reverse=True)
s += cls._format_unit_list(pairs)
elif isinstance(unit, core.NamedUnit):
s = cls._get_unit_name(unit)
return s
|
1000dc4736a96f20d8fdd02c246dbde24d7311ba0cbce58bcd3ff8e606773546 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
class Base:
"""
The abstract base class of all unit formats.
"""
registry = {}
def __new__(cls, *args, **kwargs):
# This __new__ is to make it clear that there is no reason to
# instantiate a Formatter--if you try to you'll just get back the
# class
return cls
def __init_subclass__(cls, **kwargs):
# Keep a registry of all formats. Key by the class name unless a name
# is explicitly set (i.e., one *not* inherited from a superclass).
if "name" not in cls.__dict__:
cls.name = cls.__name__.lower()
Base.registry[cls.name] = cls
super().__init_subclass__(**kwargs)
@classmethod
def parse(cls, s):
"""
Convert a string to a unit object.
"""
raise NotImplementedError(f"Can not parse with {cls.__name__} format")
@classmethod
def to_string(cls, u):
"""
Convert a unit object to a string.
"""
raise NotImplementedError(f"Can not output in {cls.__name__} format")
|
33427ee69c8ffcee2c007963a48231b11975d38d125c1a922a9f28015aa25700 | # Licensed under a 3-clause BSD style license - see LICNSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""
Handles units in `Office of Guest Investigator Programs (OGIP)
FITS files
<https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/>`__.
"""
import copy
import keyword
import math
import warnings
from fractions import Fraction
from astropy.utils import parsing
from . import core, generic, utils
class OGIP(generic.Generic):
"""
Support the units in `Office of Guest Investigator Programs (OGIP)
FITS files
<https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/>`__.
"""
_tokens = (
"DIVISION",
"OPEN_PAREN",
"CLOSE_PAREN",
"WHITESPACE",
"STARSTAR",
"STAR",
"SIGN",
"UFLOAT",
"LIT10",
"UINT",
"UNKNOWN",
"UNIT",
)
@staticmethod
def _generate_unit_names():
from astropy import units as u
names = {}
deprecated_names = set()
bases = [
"A", "C", "cd", "eV", "F", "g", "H", "Hz", "J",
"Jy", "K", "lm", "lx", "m", "mol", "N", "ohm", "Pa",
"pc", "rad", "s", "S", "sr", "T", "V", "W", "Wb",
] # fmt: skip
deprecated_bases = []
prefixes = [
"y", "z", "a", "f", "p", "n", "u", "m", "c", "d",
"", "da", "h", "k", "M", "G", "T", "P", "E", "Z", "Y",
] # fmt: skip
for base in bases + deprecated_bases:
for prefix in prefixes:
key = prefix + base
if keyword.iskeyword(key):
continue
names[key] = getattr(u, key)
for base in deprecated_bases:
for prefix in prefixes:
deprecated_names.add(prefix + base)
simple_units = [
"angstrom", "arcmin", "arcsec", "AU", "barn", "bin",
"byte", "chan", "count", "day", "deg", "erg", "G",
"h", "lyr", "mag", "min", "photon", "pixel",
"voxel", "yr",
] # fmt: skip
for unit in simple_units:
names[unit] = getattr(u, unit)
# Create a separate, disconnected unit for the special case of
# Crab and mCrab, since OGIP doesn't define their quantities.
Crab = u.def_unit(["Crab"], prefixes=False, doc="Crab (X-ray flux)")
mCrab = u.Unit(10**-3 * Crab)
names["Crab"] = Crab
names["mCrab"] = mCrab
deprecated_units = ["Crab", "mCrab"]
for unit in deprecated_units:
deprecated_names.add(unit)
functions = [
"log", "ln", "exp", "sqrt", "sin", "cos", "tan", "asin",
"acos", "atan", "sinh", "cosh", "tanh",
] # fmt: skip
for name in functions:
names[name] = name
return names, deprecated_names, functions
@classmethod
def _make_lexer(cls):
tokens = cls._tokens
t_DIVISION = r"/"
t_OPEN_PAREN = r"\("
t_CLOSE_PAREN = r"\)"
t_WHITESPACE = "[ \t]+"
t_STARSTAR = r"\*\*"
t_STAR = r"\*"
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r"(((\d+\.?\d*)|(\.\d+))([eE][+-]?\d+))|(((\d+\.\d*)|(\.\d+))([eE][+-]?\d+)?)"
t.value = float(t.value)
return t
def t_UINT(t):
r"\d+"
t.value = int(t.value)
return t
def t_SIGN(t):
r"[+-](?=\d)"
t.value = float(t.value + "1")
return t
def t_X(t): # multiplication for factor in front of unit
r"[x×]"
return t
def t_LIT10(t):
r"10"
return 10
def t_UNKNOWN(t):
r"[Uu][Nn][Kk][Nn][Oo][Ww][Nn]"
return None
def t_UNIT(t):
r"[a-zA-Z][a-zA-Z_]*"
t.value = cls._get_unit(t)
return t
# Don't ignore whitespace
t_ignore = ""
# Error handling rule
def t_error(t):
raise ValueError(f"Invalid character at col {t.lexpos}")
return parsing.lex(lextab="ogip_lextab", package="astropy/units")
@classmethod
def _make_parser(cls):
"""
The grammar here is based on the description in the
`Specification of Physical Units within OGIP FITS files
<https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/>`__,
which is not terribly precise. The exact grammar is here is
based on the YACC grammar in the `unity library
<https://bitbucket.org/nxg/unity/>`_.
"""
tokens = cls._tokens
def p_main(p):
"""
main : UNKNOWN
| complete_expression
| scale_factor complete_expression
| scale_factor WHITESPACE complete_expression
"""
if len(p) == 4:
p[0] = p[1] * p[3]
elif len(p) == 3:
p[0] = p[1] * p[2]
else:
p[0] = p[1]
def p_complete_expression(p):
"""
complete_expression : product_of_units
"""
p[0] = p[1]
def p_product_of_units(p):
"""
product_of_units : unit_expression
| division unit_expression
| product_of_units product unit_expression
| product_of_units division unit_expression
"""
if len(p) == 4:
if p[2] == "DIVISION":
p[0] = p[1] / p[3]
else:
p[0] = p[1] * p[3]
elif len(p) == 3:
p[0] = p[2] ** -1
else:
p[0] = p[1]
def p_unit_expression(p):
"""
unit_expression : unit
| UNIT OPEN_PAREN complete_expression CLOSE_PAREN
| OPEN_PAREN complete_expression CLOSE_PAREN
| UNIT OPEN_PAREN complete_expression CLOSE_PAREN power numeric_power
| OPEN_PAREN complete_expression CLOSE_PAREN power numeric_power
"""
# If we run p[1] in cls._functions, it will try and parse each
# item in the list into a unit, which is slow. Since we know that
# all the items in the list are strings, we can simply convert
# p[1] to a string instead.
p1_str = str(p[1])
if p1_str in cls._functions and p1_str != "sqrt":
raise ValueError(
f"The function '{p[1]}' is valid in OGIP, but not understood "
"by astropy.units."
)
if len(p) == 7:
if p1_str == "sqrt":
p[0] = p[1] * p[3] ** (0.5 * p[6])
else:
p[0] = p[1] * p[3] ** p[6]
elif len(p) == 6:
p[0] = p[2] ** p[5]
elif len(p) == 5:
if p1_str == "sqrt":
p[0] = p[3] ** 0.5
else:
p[0] = p[1] * p[3]
elif len(p) == 4:
p[0] = p[2]
else:
p[0] = p[1]
def p_scale_factor(p):
"""
scale_factor : LIT10 power numeric_power
| LIT10
| signed_float
| signed_float power numeric_power
| signed_int power numeric_power
"""
if len(p) == 4:
p[0] = 10 ** p[3]
else:
p[0] = p[1]
# Can't use np.log10 here, because p[0] may be a Python long.
if math.log10(p[0]) % 1.0 != 0.0:
from astropy.units.core import UnitsWarning
warnings.warn(
f"'{p[0]}' scale should be a power of 10 in OGIP format",
UnitsWarning,
)
def p_division(p):
"""
division : DIVISION
| WHITESPACE DIVISION
| WHITESPACE DIVISION WHITESPACE
| DIVISION WHITESPACE
"""
p[0] = "DIVISION"
def p_product(p):
"""
product : WHITESPACE
| STAR
| WHITESPACE STAR
| WHITESPACE STAR WHITESPACE
| STAR WHITESPACE
"""
p[0] = "PRODUCT"
def p_power(p):
"""
power : STARSTAR
"""
p[0] = "POWER"
def p_unit(p):
"""
unit : UNIT
| UNIT power numeric_power
"""
if len(p) == 4:
p[0] = p[1] ** p[3]
else:
p[0] = p[1]
def p_numeric_power(p):
"""
numeric_power : UINT
| signed_float
| OPEN_PAREN signed_int CLOSE_PAREN
| OPEN_PAREN signed_float CLOSE_PAREN
| OPEN_PAREN signed_float division UINT CLOSE_PAREN
"""
if len(p) == 6:
p[0] = Fraction(int(p[2]), int(p[4]))
elif len(p) == 4:
p[0] = p[2]
else:
p[0] = p[1]
def p_sign(p):
"""
sign : SIGN
|
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_signed_int(p):
"""
signed_int : SIGN UINT
"""
p[0] = p[1] * p[2]
def p_signed_float(p):
"""
signed_float : sign UINT
| sign UFLOAT
"""
p[0] = p[1] * p[2]
def p_error(p):
raise ValueError()
return parsing.yacc(tabmodule="ogip_parsetab", package="astropy/units")
@classmethod
def _validate_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
if detailed_exception:
raise ValueError(
f"Unit '{unit}' not supported by the OGIP standard. "
+ utils.did_you_mean_units(
unit,
cls._units,
cls._deprecated_units,
cls._to_decomposed_alternative,
),
)
else:
raise ValueError()
if unit in cls._deprecated_units:
utils.unit_deprecation_warning(
unit, cls._units[unit], "OGIP", cls._to_decomposed_alternative
)
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
cls._validate_unit(unit, detailed_exception=detailed_exception)
return cls._units[unit]
@classmethod
def parse(cls, s, debug=False):
s = s.strip()
try:
# This is a short circuit for the case where the string is
# just a single unit name
return cls._parse_unit(s, detailed_exception=False)
except ValueError:
try:
return core.Unit(cls._parser.parse(s, lexer=cls._lexer, debug=debug))
except ValueError as e:
if str(e):
raise
else:
raise ValueError(f"Syntax error parsing unit '{s}'")
@classmethod
def _get_unit_name(cls, unit):
name = unit.get_format_name("ogip")
cls._validate_unit(name)
return name
@classmethod
def _format_unit_list(cls, units):
out = []
units.sort(key=lambda x: cls._get_unit_name(x[0]).lower())
for base, power in units:
if power == 1:
out.append(cls._get_unit_name(base))
else:
power = utils.format_power(power)
if "/" in power:
out.append(f"{cls._get_unit_name(base)}**({power})")
else:
out.append(f"{cls._get_unit_name(base)}**{power}")
return " ".join(out)
@classmethod
def to_string(cls, unit):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if isinstance(unit, core.CompositeUnit):
# Can't use np.log10 here, because p[0] may be a Python long.
if math.log10(unit.scale) % 1.0 != 0.0:
warnings.warn(
f"'{unit.scale}' scale should be a power of 10 in OGIP format",
core.UnitsWarning,
)
return generic._to_string(cls, unit)
@classmethod
def _to_decomposed_alternative(cls, unit):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if isinstance(unit, core.CompositeUnit):
# Can't use np.log10 here, because p[0] may be a Python long.
if math.log10(unit.scale) % 1.0 != 0.0:
scale = unit.scale
unit = copy.copy(unit)
unit._scale = 1.0
return (
f"{generic._to_string(cls, unit)} (with data multiplied by {scale})"
)
return generic._to_string(unit)
|
47bf912252cd0092515f3e13e5b80e924bef5f9f9a0f728039ca7b681735ffee | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# The idea for this module (but no code) was borrowed from the
# quantities (http://pythonhosted.org/quantities/) package.
"""Helper functions for Quantity.
In particular, this implements the logic that determines scaling and result
units for a given ufunc, given input units.
"""
from fractions import Fraction
import numpy as np
from astropy.units.core import (
UnitConversionError,
UnitsError,
UnitTypeError,
dimensionless_unscaled,
unit_scale_converter,
)
from . import UFUNC_HELPERS, UNSUPPORTED_UFUNCS
def _d(unit):
if unit is None:
return dimensionless_unscaled
else:
return unit
def get_converter(from_unit, to_unit):
"""Like Unit._get_converter, except returns None if no scaling is needed,
i.e., if the inferred scale is unity.
"""
converter = from_unit._get_converter(to_unit)
return None if converter is unit_scale_converter else converter
def get_converters_and_unit(f, unit1, unit2):
converters = [None, None]
# By default, we try adjusting unit2 to unit1, so that the result will
# be unit1 as well. But if there is no second unit, we have to try
# adjusting unit1 (to dimensionless, see below).
if unit2 is None:
if unit1 is None:
# No units for any input -- e.g., np.add(a1, a2, out=q)
return converters, dimensionless_unscaled
changeable = 0
# swap units.
unit2 = unit1
unit1 = None
elif unit2 is unit1:
# ensure identical units is fast ("==" is slow, so avoid that).
return converters, unit1
else:
changeable = 1
# Try to get a converter from unit2 to unit1.
if unit1 is None:
try:
converters[changeable] = get_converter(unit2, dimensionless_unscaled)
except UnitsError:
# special case: would be OK if unitless number is zero, inf, nan
converters[1 - changeable] = False
return converters, unit2
else:
return converters, dimensionless_unscaled
else:
try:
converters[changeable] = get_converter(unit2, unit1)
except UnitsError:
raise UnitConversionError(
f"Can only apply '{f.__name__}' function to quantities "
"with compatible dimensions"
)
return converters, unit1
# SINGLE ARGUMENT UFUNC HELPERS
#
# The functions below take a single argument, which is the quantity upon which
# the ufunc is being used. The output of the helper function should be two
# values: a list with a single converter to be used to scale the input before
# it is being passed to the ufunc (or None if no conversion is needed), and
# the unit the output will be in.
def helper_onearg_test(f, unit):
return ([None], None)
def helper_invariant(f, unit):
return ([None], _d(unit))
def helper_square(f, unit):
return ([None], unit**2 if unit is not None else dimensionless_unscaled)
def helper_reciprocal(f, unit):
return ([None], unit**-1 if unit is not None else dimensionless_unscaled)
one_half = 0.5 # faster than Fraction(1, 2)
one_third = Fraction(1, 3)
def helper_sqrt(f, unit):
return ([None], unit**one_half if unit is not None else dimensionless_unscaled)
def helper_cbrt(f, unit):
return ([None], (unit**one_third if unit is not None else dimensionless_unscaled))
def helper_modf(f, unit):
if unit is None:
return [None], (dimensionless_unscaled, dimensionless_unscaled)
try:
return (
[get_converter(unit, dimensionless_unscaled)],
(dimensionless_unscaled, dimensionless_unscaled),
)
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to dimensionless quantities"
)
def helper__ones_like(f, unit):
return [None], dimensionless_unscaled
def helper_dimensionless_to_dimensionless(f, unit):
if unit is None:
return [None], dimensionless_unscaled
try:
return ([get_converter(unit, dimensionless_unscaled)], dimensionless_unscaled)
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to dimensionless quantities"
)
def helper_dimensionless_to_radian(f, unit):
from astropy.units.si import radian
if unit is None:
return [None], radian
try:
return [get_converter(unit, dimensionless_unscaled)], radian
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to dimensionless quantities"
)
def helper_degree_to_radian(f, unit):
from astropy.units.si import degree, radian
try:
return [get_converter(unit, degree)], radian
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to quantities with angle units"
)
def helper_radian_to_degree(f, unit):
from astropy.units.si import degree, radian
try:
return [get_converter(unit, radian)], degree
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to quantities with angle units"
)
def helper_radian_to_dimensionless(f, unit):
from astropy.units.si import radian
try:
return [get_converter(unit, radian)], dimensionless_unscaled
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to quantities with angle units"
)
def helper_frexp(f, unit):
if not unit.is_unity():
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to unscaled dimensionless"
" quantities"
)
return [None], (None, None)
# TWO ARGUMENT UFUNC HELPERS
#
# The functions below take a two arguments. The output of the helper function
# should be two values: a tuple of two converters to be used to scale the
# inputs before being passed to the ufunc (None if no conversion is needed),
# and the unit the output will be in.
def helper_multiplication(f, unit1, unit2):
return [None, None], _d(unit1) * _d(unit2)
def helper_division(f, unit1, unit2):
return [None, None], _d(unit1) / _d(unit2)
def helper_power(f, unit1, unit2):
# TODO: find a better way to do this, currently need to signal that one
# still needs to raise power of unit1 in main code
if unit2 is None:
return [None, None], False
try:
return [None, get_converter(unit2, dimensionless_unscaled)], False
except UnitsError:
raise UnitTypeError("Can only raise something to a dimensionless quantity")
def helper_ldexp(f, unit1, unit2):
if unit2 is not None:
raise TypeError("Cannot use ldexp with a quantity as second argument.")
else:
return [None, None], _d(unit1)
def helper_copysign(f, unit1, unit2):
# if first arg is not a quantity, just return plain array
if unit1 is None:
return [None, None], None
else:
return [None, None], unit1
def helper_heaviside(f, unit1, unit2):
try:
converter2 = (
get_converter(unit2, dimensionless_unscaled) if unit2 is not None else None
)
except UnitsError:
raise UnitTypeError(
"Can only apply 'heaviside' function with a dimensionless second argument."
)
return ([None, converter2], dimensionless_unscaled)
def helper_two_arg_dimensionless(f, unit1, unit2):
try:
converter1 = (
get_converter(unit1, dimensionless_unscaled) if unit1 is not None else None
)
converter2 = (
get_converter(unit2, dimensionless_unscaled) if unit2 is not None else None
)
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to dimensionless quantities"
)
return ([converter1, converter2], dimensionless_unscaled)
# This used to be a separate function that just called get_converters_and_unit.
# Using it directly saves a few us; keeping the clearer name.
helper_twoarg_invariant = get_converters_and_unit
def helper_twoarg_comparison(f, unit1, unit2):
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, None
def helper_twoarg_invtrig(f, unit1, unit2):
from astropy.units.si import radian
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, radian
def helper_twoarg_floor_divide(f, unit1, unit2):
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, dimensionless_unscaled
def helper_divmod(f, unit1, unit2):
converters, result_unit = get_converters_and_unit(f, unit1, unit2)
return converters, (dimensionless_unscaled, result_unit)
def helper_clip(f, unit1, unit2, unit3):
# Treat the array being clipped as primary.
converters = [None]
if unit1 is None:
result_unit = dimensionless_unscaled
try:
converters += [
(None if unit is None else get_converter(unit, dimensionless_unscaled))
for unit in (unit2, unit3)
]
except UnitsError:
raise UnitConversionError(
f"Can only apply '{f.__name__}' function to quantities with "
"compatible dimensions"
)
else:
result_unit = unit1
for unit in unit2, unit3:
try:
converter = get_converter(_d(unit), result_unit)
except UnitsError:
if unit is None:
# special case: OK if unitless number is zero, inf, nan
converters.append(False)
else:
raise UnitConversionError(
f"Can only apply '{f.__name__}' function to quantities with "
"compatible dimensions"
)
else:
converters.append(converter)
return converters, result_unit
# list of ufuncs:
# https://numpy.org/doc/stable/reference/ufuncs.html#available-ufuncs
UNSUPPORTED_UFUNCS |= {
np.bitwise_and,
np.bitwise_or,
np.bitwise_xor,
np.invert,
np.left_shift,
np.right_shift,
np.logical_and,
np.logical_or,
np.logical_xor,
np.logical_not,
np.isnat,
np.gcd,
np.lcm,
}
# SINGLE ARGUMENT UFUNCS
# ufuncs that do not care about the unit and do not return a Quantity
# (but rather a boolean, or -1, 0, or +1 for np.sign).
onearg_test_ufuncs = (np.isfinite, np.isinf, np.isnan, np.sign, np.signbit)
for ufunc in onearg_test_ufuncs:
UFUNC_HELPERS[ufunc] = helper_onearg_test
# ufuncs that return a value with the same unit as the input
invariant_ufuncs = (
np.absolute,
np.fabs,
np.conj,
np.conjugate,
np.negative,
np.spacing,
np.rint,
np.floor,
np.ceil,
np.trunc,
np.positive,
)
for ufunc in invariant_ufuncs:
UFUNC_HELPERS[ufunc] = helper_invariant
# ufuncs that require dimensionless input and and give dimensionless output
dimensionless_to_dimensionless_ufuncs = (
np.exp,
np.expm1,
np.exp2,
np.log,
np.log10,
np.log2,
np.log1p,
)
# Default numpy does not ship an "erf" ufunc, but some versions hacked by
# intel do. This is bad, since it means code written for that numpy will
# not run on non-hacked numpy. But still, we might as well support it.
if isinstance(getattr(np.core.umath, "erf", None), np.ufunc):
dimensionless_to_dimensionless_ufuncs += (np.core.umath.erf,)
for ufunc in dimensionless_to_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_dimensionless_to_dimensionless
# ufuncs that require dimensionless input and give output in radians
dimensionless_to_radian_ufuncs = (
np.arccos,
np.arcsin,
np.arctan,
np.arccosh,
np.arcsinh,
np.arctanh,
)
for ufunc in dimensionless_to_radian_ufuncs:
UFUNC_HELPERS[ufunc] = helper_dimensionless_to_radian
# ufuncs that require input in degrees and give output in radians
degree_to_radian_ufuncs = (np.radians, np.deg2rad)
for ufunc in degree_to_radian_ufuncs:
UFUNC_HELPERS[ufunc] = helper_degree_to_radian
# ufuncs that require input in radians and give output in degrees
radian_to_degree_ufuncs = (np.degrees, np.rad2deg)
for ufunc in radian_to_degree_ufuncs:
UFUNC_HELPERS[ufunc] = helper_radian_to_degree
# ufuncs that require input in radians and give dimensionless output
radian_to_dimensionless_ufuncs = (np.cos, np.sin, np.tan, np.cosh, np.sinh, np.tanh)
for ufunc in radian_to_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_radian_to_dimensionless
# ufuncs handled as special cases
UFUNC_HELPERS[np.sqrt] = helper_sqrt
UFUNC_HELPERS[np.square] = helper_square
UFUNC_HELPERS[np.reciprocal] = helper_reciprocal
UFUNC_HELPERS[np.cbrt] = helper_cbrt
UFUNC_HELPERS[np.core.umath._ones_like] = helper__ones_like
UFUNC_HELPERS[np.modf] = helper_modf
UFUNC_HELPERS[np.frexp] = helper_frexp
# TWO ARGUMENT UFUNCS
# two argument ufuncs that require dimensionless input and and give
# dimensionless output
two_arg_dimensionless_ufuncs = (np.logaddexp, np.logaddexp2)
for ufunc in two_arg_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_two_arg_dimensionless
# two argument ufuncs that return a value with the same unit as the input
twoarg_invariant_ufuncs = (
np.add,
np.subtract,
np.hypot,
np.maximum,
np.minimum,
np.fmin,
np.fmax,
np.nextafter,
np.remainder,
np.mod,
np.fmod,
)
for ufunc in twoarg_invariant_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_invariant
# two argument ufuncs that need compatible inputs and return a boolean
twoarg_comparison_ufuncs = (
np.greater,
np.greater_equal,
np.less,
np.less_equal,
np.not_equal,
np.equal,
)
for ufunc in twoarg_comparison_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_comparison
# two argument ufuncs that do inverse trigonometry
twoarg_invtrig_ufuncs = (np.arctan2,)
# another private function in numpy; use getattr in case it disappears
if isinstance(getattr(np.core.umath, "_arg", None), np.ufunc):
twoarg_invtrig_ufuncs += (np.core.umath._arg,)
for ufunc in twoarg_invtrig_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_invtrig
# ufuncs handled as special cases
UFUNC_HELPERS[np.multiply] = helper_multiplication
if isinstance(getattr(np, "matmul", None), np.ufunc):
UFUNC_HELPERS[np.matmul] = helper_multiplication
UFUNC_HELPERS[np.divide] = helper_division
UFUNC_HELPERS[np.true_divide] = helper_division
UFUNC_HELPERS[np.power] = helper_power
UFUNC_HELPERS[np.ldexp] = helper_ldexp
UFUNC_HELPERS[np.copysign] = helper_copysign
UFUNC_HELPERS[np.floor_divide] = helper_twoarg_floor_divide
UFUNC_HELPERS[np.heaviside] = helper_heaviside
UFUNC_HELPERS[np.float_power] = helper_power
UFUNC_HELPERS[np.divmod] = helper_divmod
# Check for clip ufunc; note that np.clip is a wrapper function, not the ufunc.
if isinstance(getattr(np.core.umath, "clip", None), np.ufunc):
UFUNC_HELPERS[np.core.umath.clip] = helper_clip
del ufunc
|
da5178ca0dc8c455a1fdb33c77c70a7f33e0d6d0cfbc9e6b538a58c23e3f444e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Converters for Quantity."""
import threading
import numpy as np
from astropy.units.core import (
UnitConversionError,
UnitsError,
UnitTypeError,
dimensionless_unscaled,
)
__all__ = [
"can_have_arbitrary_unit",
"converters_and_unit",
"check_output",
"UFUNC_HELPERS",
"UNSUPPORTED_UFUNCS",
]
class UfuncHelpers(dict):
"""Registry of unit conversion functions to help ufunc evaluation.
Based on dict for quick access, but with a missing method to load
helpers for additional modules such as scipy.special and erfa.
Such modules should be registered using ``register_module``.
"""
def __init__(self, *args, **kwargs):
self.modules = {}
self.UNSUPPORTED = set() # Upper-case for backwards compatibility
self._lock = threading.RLock()
super().__init__(*args, **kwargs)
def register_module(self, module, names, importer):
"""Register (but do not import) a set of ufunc helpers.
Parameters
----------
module : str
Name of the module with the ufuncs (e.g., 'scipy.special').
names : iterable of str
Names of the module ufuncs for which helpers are available.
importer : callable
Function that imports the ufuncs and returns a dict of helpers
keyed by those ufuncs. If the value is `None`, the ufunc is
explicitly *not* supported.
"""
with self._lock:
self.modules[module] = {"names": names, "importer": importer}
def import_module(self, module):
"""Import the helpers from the given module using its helper function.
Parameters
----------
module : str
Name of the module. Has to have been registered beforehand.
"""
with self._lock:
module_info = self.modules.pop(module)
self.update(module_info["importer"]())
def __missing__(self, ufunc):
"""Called if a ufunc is not found.
Check if the ufunc is in any of the available modules, and, if so,
import the helpers for that module.
"""
with self._lock:
# Check if it was loaded while we waited for the lock
if ufunc in self:
return self[ufunc]
if ufunc in self.UNSUPPORTED:
raise TypeError(f"Cannot use ufunc '{ufunc.__name__}' with quantities")
for module, module_info in list(self.modules.items()):
if ufunc.__name__ in module_info["names"]:
# A ufunc with the same name is supported by this module.
# Of course, this doesn't necessarily mean it is the
# right module. So, we try let the importer do its work.
# If it fails (e.g., for `scipy.special`), then that's
# fine, just raise the TypeError. If it succeeds, but
# the ufunc is not found, that is also fine: we will
# enter __missing__ again and either find another
# module or get the TypeError there.
try:
self.import_module(module)
except ImportError: # pragma: no cover
pass
else:
return self[ufunc]
raise TypeError(
f"unknown ufunc {ufunc.__name__}. If you believe this ufunc "
"should be supported, please raise an issue on "
"https://github.com/astropy/astropy"
)
def __setitem__(self, key, value):
# Implementation note: in principle, we could just let `None`
# mean that something is not implemented, but this means an
# extra if clause for the output, slowing down the common
# path where a ufunc is supported.
with self._lock:
if value is None:
self.UNSUPPORTED |= {key}
self.pop(key, None)
else:
super().__setitem__(key, value)
self.UNSUPPORTED -= {key}
UFUNC_HELPERS = UfuncHelpers()
UNSUPPORTED_UFUNCS = UFUNC_HELPERS.UNSUPPORTED
def can_have_arbitrary_unit(value):
"""Test whether the items in value can have arbitrary units.
Numbers whose value does not change upon a unit change, i.e.,
zero, infinity, or not-a-number
Parameters
----------
value : number or array
Returns
-------
bool
`True` if each member is either zero or not finite, `False` otherwise
"""
return np.all(np.logical_or(np.equal(value, 0.0), ~np.isfinite(value)))
def converters_and_unit(function, method, *args):
"""Determine the required converters and the unit of the ufunc result.
Converters are functions required to convert to a ufunc's expected unit,
e.g., radian for np.sin; or to ensure units of two inputs are consistent,
e.g., for np.add. In these examples, the unit of the result would be
dimensionless_unscaled for np.sin, and the same consistent unit for np.add.
Parameters
----------
function : `~numpy.ufunc`
Numpy universal function
method : str
Method with which the function is evaluated, e.g.,
'__call__', 'reduce', etc.
*args : `~astropy.units.Quantity` or ndarray subclass
Input arguments to the function
Raises
------
TypeError : when the specified function cannot be used with Quantities
(e.g., np.logical_or), or when the routine does not know how to handle
the specified function (in which case an issue should be raised on
https://github.com/astropy/astropy).
UnitTypeError : when the conversion to the required (or consistent) units
is not possible.
"""
# Check whether we support this ufunc, by getting the helper function
# (defined in helpers) which returns a list of function(s) that convert the
# input(s) to the unit required for the ufunc, as well as the unit the
# result will have (a tuple of units if there are multiple outputs).
ufunc_helper = UFUNC_HELPERS[function]
if method == "__call__" or (method == "outer" and function.nin == 2):
# Find out the units of the arguments passed to the ufunc; usually,
# at least one is a quantity, but for two-argument ufuncs, the second
# could also be a Numpy array, etc. These are given unit=None.
units = [getattr(arg, "unit", None) for arg in args]
# Determine possible conversion functions, and the result unit.
converters, result_unit = ufunc_helper(function, *units)
if any(converter is False for converter in converters):
# for multi-argument ufuncs with a quantity and a non-quantity,
# the quantity normally needs to be dimensionless, *except*
# if the non-quantity can have arbitrary unit, i.e., when it
# is all zero, infinity or NaN. In that case, the non-quantity
# can just have the unit of the quantity
# (this allows, e.g., `q > 0.` independent of unit)
try:
# Don't fold this loop in the test above: this rare case
# should not make the common case slower.
for i, converter in enumerate(converters):
if converter is not False:
continue
if can_have_arbitrary_unit(args[i]):
converters[i] = None
else:
raise UnitConversionError(
f"Can only apply '{function.__name__}' function to "
"dimensionless quantities when other argument is not "
"a quantity (unless the latter is all zero/infinity/nan)."
)
except TypeError:
# _can_have_arbitrary_unit failed: arg could not be compared
# with zero or checked to be finite. Then, ufunc will fail too.
raise TypeError(
"Unsupported operand type(s) for ufunc {}: '{}'".format(
function.__name__,
",".join([arg.__class__.__name__ for arg in args]),
)
)
# In the case of np.power and np.float_power, the unit itself needs to
# be modified by an amount that depends on one of the input values,
# so we need to treat this as a special case.
# TODO: find a better way to deal with this.
if result_unit is False:
if units[0] is None or units[0] == dimensionless_unscaled:
result_unit = dimensionless_unscaled
else:
if units[1] is None:
p = args[1]
else:
p = args[1].to(dimensionless_unscaled).value
try:
result_unit = units[0] ** p
except ValueError as exc:
# Changing the unit does not work for, e.g., array-shaped
# power, but this is OK if we're (scaled) dimensionless.
try:
converters[0] = units[0]._get_converter(dimensionless_unscaled)
except UnitConversionError:
raise exc
else:
result_unit = dimensionless_unscaled
else: # methods for which the unit should stay the same
nin = function.nin
unit = getattr(args[0], "unit", None)
if method == "at" and nin <= 2:
if nin == 1:
units = [unit]
else:
units = [unit, getattr(args[2], "unit", None)]
converters, result_unit = ufunc_helper(function, *units)
# ensure there is no 'converter' for indices (2nd argument)
converters.insert(1, None)
elif method in {"reduce", "accumulate", "reduceat"} and nin == 2:
converters, result_unit = ufunc_helper(function, unit, unit)
converters = converters[:1]
if method == "reduceat":
# add 'scale' for indices (2nd argument)
converters += [None]
else:
if method in {"reduce", "accumulate", "reduceat", "outer"} and nin != 2:
raise ValueError(f"{method} only supported for binary functions")
raise TypeError(
f"Unexpected ufunc method {method}. If this should work, please "
"raise an issue on https://github.com/astropy/astropy"
)
# for all but __call__ method, scaling is not allowed
if unit is not None and result_unit is None:
raise TypeError(
f"Cannot use '{method}' method on ufunc {function.__name__} with a "
"Quantity instance as the result is not a Quantity."
)
if converters[0] is not None or (
unit is not None
and unit is not result_unit
and (not result_unit.is_equivalent(unit) or result_unit.to(unit) != 1.0)
):
# NOTE: this cannot be the more logical UnitTypeError, since
# then things like np.cumprod will not longer fail (they check
# for TypeError).
raise UnitsError(
f"Cannot use '{method}' method on ufunc {function.__name__} with a "
"Quantity instance as it would change the unit."
)
return converters, result_unit
def check_output(output, unit, inputs, function=None):
"""Check that function output can be stored in the output array given.
Parameters
----------
output : array or `~astropy.units.Quantity` or tuple
Array that should hold the function output (or tuple of such arrays).
unit : `~astropy.units.Unit` or None, or tuple
Unit that the output will have, or `None` for pure numbers (should be
tuple of same if output is a tuple of outputs).
inputs : tuple
Any input arguments. These should be castable to the output.
function : callable
The function that will be producing the output. If given, used to
give a more informative error message.
Returns
-------
arrays : ndarray view or tuple thereof
The view(s) is of ``output``.
Raises
------
UnitTypeError : If ``unit`` is inconsistent with the class of ``output``
TypeError : If the ``inputs`` cannot be cast safely to ``output``.
"""
if isinstance(output, tuple):
return tuple(
check_output(output_, unit_, inputs, function)
for output_, unit_ in zip(output, unit)
)
# ``None`` indicates no actual array is needed. This can happen, e.g.,
# with np.modf(a, out=(None, b)).
if output is None:
return None
if hasattr(output, "__quantity_subclass__"):
# Check that we're not trying to store a plain Numpy array or a
# Quantity with an inconsistent unit (e.g., not angular for Angle).
if unit is None:
raise TypeError(
"Cannot store non-quantity output{} in {} instance".format(
(
f" from {function.__name__} function"
if function is not None
else ""
),
type(output),
)
)
q_cls, subok = output.__quantity_subclass__(unit)
if not (subok or q_cls is type(output)):
raise UnitTypeError(
"Cannot store output with unit '{}'{} "
"in {} instance. Use {} instance instead.".format(
unit,
(
f" from {function.__name__} function"
if function is not None
else ""
),
type(output),
q_cls,
)
)
# check we can handle the dtype (e.g., that we are not int
# when float is required). Note that we only do this for Quantity
# output; for array output, we defer to numpy's default handling.
# Also, any structured dtype are ignored (likely erfa ufuncs).
# TODO: make more logical; is this necessary at all?
if inputs and not output.dtype.names:
result_type = np.result_type(*inputs)
if not (
result_type.names
or np.can_cast(result_type, output.dtype, casting="same_kind")
):
raise TypeError(
"Arguments cannot be cast safely to inplace "
f"output with dtype={output.dtype}"
)
# Turn into ndarray, so we do not loop into array_wrap/array_ufunc
# if the output is used to store results of a function.
return output.view(np.ndarray)
else:
# output is not a Quantity, so cannot obtain a unit.
if not (unit is None or unit is dimensionless_unscaled):
raise UnitTypeError(
"Cannot store quantity with dimension "
"{}in a non-Quantity instance.".format(
f"resulting from {function.__name__} function "
if function is not None
else ""
)
)
return output
|
992e1058e1062093a4510ea7bd32f3184fb73795e932d7c463bb795b589f6f47 | # Licensed under a 3-clause BSD style license. See LICENSE.rst except
# for parts explicitly labelled as being (largely) copies of numpy
# implementations; for those, see licenses/NUMPY_LICENSE.rst.
"""Helpers for overriding numpy functions.
We override numpy functions in `~astropy.units.Quantity.__array_function__`.
In this module, the numpy functions are split in four groups, each of
which has an associated `set` or `dict`:
1. SUBCLASS_SAFE_FUNCTIONS (set), if the numpy implementation
supports Quantity; we pass on to ndarray.__array_function__.
2. FUNCTION_HELPERS (dict), if the numpy implementation is usable
after converting quantities to arrays with suitable units,
and possibly setting units on the result.
3. DISPATCHED_FUNCTIONS (dict), if the function makes sense but
requires a Quantity-specific implementation
4. UNSUPPORTED_FUNCTIONS (set), if the function does not make sense.
For the FUNCTION_HELPERS `dict`, the value is a function that does the
unit conversion. It should take the same arguments as the numpy
function would (though one can use ``*args`` and ``**kwargs``) and
return a tuple of ``args, kwargs, unit, out``, where ``args`` and
``kwargs`` will be will be passed on to the numpy implementation,
``unit`` is a possible unit of the result (`None` if it should not be
converted to Quantity), and ``out`` is a possible output Quantity passed
in, which will be filled in-place.
For the DISPATCHED_FUNCTIONS `dict`, the value is a function that
implements the numpy functionality for Quantity input. It should
return a tuple of ``result, unit, out``, where ``result`` is generally
a plain array with the result, and ``unit`` and ``out`` are as above.
If unit is `None`, result gets returned directly, so one can also
return a Quantity directly using ``quantity_result, None, None``.
"""
import functools
import operator
import numpy as np
from numpy.lib import recfunctions as rfn
from astropy.units.core import (
UnitConversionError,
UnitsError,
UnitTypeError,
dimensionless_unscaled,
)
from astropy.utils import isiterable
from astropy.utils.compat import NUMPY_LT_1_23
# In 1.17, overrides are enabled by default, but it is still possible to
# turn them off using an environment variable. We use getattr since it
# is planned to remove that possibility in later numpy versions.
ARRAY_FUNCTION_ENABLED = getattr(np.core.overrides, "ENABLE_ARRAY_FUNCTION", True)
SUBCLASS_SAFE_FUNCTIONS = set()
"""Functions with implementations supporting subclasses like Quantity."""
FUNCTION_HELPERS = {}
"""Functions with implementations usable with proper unit conversion."""
DISPATCHED_FUNCTIONS = {}
"""Functions for which we provide our own implementation."""
UNSUPPORTED_FUNCTIONS = set()
"""Functions that cannot sensibly be used with quantities."""
SUBCLASS_SAFE_FUNCTIONS |= {
np.shape, np.size, np.ndim,
np.reshape, np.ravel, np.moveaxis, np.rollaxis, np.swapaxes,
np.transpose, np.atleast_1d, np.atleast_2d, np.atleast_3d,
np.expand_dims, np.squeeze, np.broadcast_to, np.broadcast_arrays,
np.flip, np.fliplr, np.flipud, np.rot90,
np.argmin, np.argmax, np.argsort, np.lexsort, np.searchsorted,
np.nonzero, np.argwhere, np.flatnonzero,
np.diag_indices_from, np.triu_indices_from, np.tril_indices_from,
np.real, np.imag, np.diagonal, np.diagflat, np.empty_like,
np.compress, np.extract, np.delete, np.trim_zeros, np.roll, np.take,
np.put, np.fill_diagonal, np.tile, np.repeat,
np.split, np.array_split, np.hsplit, np.vsplit, np.dsplit,
np.stack, np.column_stack, np.hstack, np.vstack, np.dstack,
np.amax, np.amin, np.ptp, np.sum, np.cumsum,
np.prod, np.product, np.cumprod, np.cumproduct,
np.round, np.around,
np.fix, np.angle, np.i0, np.clip,
np.isposinf, np.isneginf, np.isreal, np.iscomplex,
np.average, np.mean, np.std, np.var, np.median, np.trace,
np.nanmax, np.nanmin, np.nanargmin, np.nanargmax, np.nanmean,
np.nanmedian, np.nansum, np.nancumsum, np.nanstd, np.nanvar,
np.nanprod, np.nancumprod,
np.einsum_path, np.trapz, np.linspace,
np.sort, np.msort, np.partition, np.meshgrid,
np.common_type, np.result_type, np.can_cast, np.min_scalar_type,
np.iscomplexobj, np.isrealobj,
np.shares_memory, np.may_share_memory,
np.apply_along_axis, np.take_along_axis, np.put_along_axis,
np.linalg.cond, np.linalg.multi_dot,
} # fmt: skip
# Implemented as methods on Quantity:
# np.ediff1d is from setops, but we support it anyway; the others
# currently return NotImplementedError.
# TODO: move latter to UNSUPPORTED? Would raise TypeError instead.
SUBCLASS_SAFE_FUNCTIONS |= {np.ediff1d}
UNSUPPORTED_FUNCTIONS |= {
np.packbits, np.unpackbits, np.unravel_index,
np.ravel_multi_index, np.ix_, np.cov, np.corrcoef,
np.busday_count, np.busday_offset, np.datetime_as_string,
np.is_busday, np.all, np.any, np.sometrue, np.alltrue,
} # fmt: skip
# Could be supported if we had a natural logarithm unit.
UNSUPPORTED_FUNCTIONS |= {np.linalg.slogdet}
TBD_FUNCTIONS = {
rfn.drop_fields, rfn.rename_fields, rfn.append_fields, rfn.join_by,
rfn.apply_along_fields, rfn.assign_fields_by_name,
rfn.find_duplicates, rfn.recursive_fill_fields, rfn.require_fields,
rfn.repack_fields, rfn.stack_arrays,
} # fmt: skip
UNSUPPORTED_FUNCTIONS |= TBD_FUNCTIONS
IGNORED_FUNCTIONS = {
# I/O - useless for Quantity, since no way to store the unit.
np.save, np.savez, np.savetxt, np.savez_compressed,
# Polynomials
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
# functions taking record arrays (which are deprecated)
rfn.rec_append_fields, rfn.rec_drop_fields, rfn.rec_join,
} # fmt: skip
if NUMPY_LT_1_23:
IGNORED_FUNCTIONS |= {
# Deprecated, removed in numpy 1.23
np.asscalar,
np.alen,
}
UNSUPPORTED_FUNCTIONS |= IGNORED_FUNCTIONS
class FunctionAssigner:
def __init__(self, assignments):
self.assignments = assignments
def __call__(self, f=None, helps=None, module=np):
"""Add a helper to a numpy function.
Normally used as a decorator.
If ``helps`` is given, it should be the numpy function helped (or an
iterable of numpy functions helped).
If ``helps`` is not given, it is assumed the function helped is the
numpy function with the same name as the decorated function.
"""
if f is not None:
if helps is None:
helps = getattr(module, f.__name__)
if not isiterable(helps):
helps = (helps,)
for h in helps:
self.assignments[h] = f
return f
elif helps is not None or module is not np:
return functools.partial(self.__call__, helps=helps, module=module)
else: # pragma: no cover
raise ValueError("function_helper requires at least one argument.")
function_helper = FunctionAssigner(FUNCTION_HELPERS)
dispatched_function = FunctionAssigner(DISPATCHED_FUNCTIONS)
# fmt: off
@function_helper(
helps={
np.copy, np.asfarray, np.real_if_close, np.sort_complex, np.resize,
np.fft.fft, np.fft.ifft, np.fft.rfft, np.fft.irfft,
np.fft.fft2, np.fft.ifft2, np.fft.rfft2, np.fft.irfft2,
np.fft.fftn, np.fft.ifftn, np.fft.rfftn, np.fft.irfftn,
np.fft.hfft, np.fft.ihfft,
np.linalg.eigvals, np.linalg.eigvalsh,
}
)
# fmt: on
def invariant_a_helper(a, *args, **kwargs):
return (a.view(np.ndarray),) + args, kwargs, a.unit, None
@function_helper(helps={np.tril, np.triu})
def invariant_m_helper(m, *args, **kwargs):
return (m.view(np.ndarray),) + args, kwargs, m.unit, None
@function_helper(helps={np.fft.fftshift, np.fft.ifftshift})
def invariant_x_helper(x, *args, **kwargs):
return (x.view(np.ndarray),) + args, kwargs, x.unit, None
# Note that ones_like does *not* work by default since if one creates an empty
# array with a unit, one cannot just fill it with unity. Indeed, in this
# respect, it is a bit of an odd function for Quantity. On the other hand, it
# matches the idea that a unit is the same as the quantity with that unit and
# value of 1. Also, it used to work without __array_function__.
# zeros_like does work by default for regular quantities, because numpy first
# creates an empty array with the unit and then fills it with 0 (which can have
# any unit), but for structured dtype this fails (0 cannot have an arbitrary
# structured unit), so we include it here too.
@function_helper(helps={np.ones_like, np.zeros_like})
def like_helper(a, *args, **kwargs):
subok = args[2] if len(args) > 2 else kwargs.pop("subok", True)
unit = a.unit if subok else None
return (a.view(np.ndarray),) + args, kwargs, unit, None
@function_helper
def sinc(x):
from astropy.units.si import radian
try:
x = x.to_value(radian)
except UnitsError:
raise UnitTypeError(
"Can only apply 'sinc' function to quantities with angle units"
)
return (x,), {}, dimensionless_unscaled, None
@dispatched_function
def unwrap(p, discont=None, axis=-1):
from astropy.units.si import radian
if discont is None:
discont = np.pi << radian
p, discont = _as_quantities(p, discont)
result = np.unwrap.__wrapped__(
p.to_value(radian), discont.to_value(radian), axis=axis
)
result = radian.to(p.unit, result)
return result, p.unit, None
@function_helper
def argpartition(a, *args, **kwargs):
return (a.view(np.ndarray),) + args, kwargs, None, None
@function_helper
def full_like(a, fill_value, *args, **kwargs):
unit = a.unit if kwargs.get("subok", True) else None
return (a.view(np.ndarray), a._to_own_unit(fill_value)) + args, kwargs, unit, None
@function_helper
def putmask(a, mask, values):
from astropy.units import Quantity
if isinstance(a, Quantity):
return (a.view(np.ndarray), mask, a._to_own_unit(values)), {}, a.unit, None
elif isinstance(values, Quantity):
return (a, mask, values.to_value(dimensionless_unscaled)), {}, None, None
else:
raise NotImplementedError
@function_helper
def place(arr, mask, vals):
from astropy.units import Quantity
if isinstance(arr, Quantity):
return (arr.view(np.ndarray), mask, arr._to_own_unit(vals)), {}, arr.unit, None
elif isinstance(vals, Quantity):
return (arr, mask, vals.to_value(dimensionless_unscaled)), {}, None, None
else:
raise NotImplementedError
@function_helper
def copyto(dst, src, *args, **kwargs):
from astropy.units import Quantity
if isinstance(dst, Quantity):
return (dst.view(np.ndarray), dst._to_own_unit(src)) + args, kwargs, None, None
elif isinstance(src, Quantity):
return (dst, src.to_value(dimensionless_unscaled)) + args, kwargs, None, None
else:
raise NotImplementedError
@function_helper
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
nan = x._to_own_unit(nan)
if posinf is not None:
posinf = x._to_own_unit(posinf)
if neginf is not None:
neginf = x._to_own_unit(neginf)
return (
(x.view(np.ndarray),),
dict(copy=True, nan=nan, posinf=posinf, neginf=neginf),
x.unit,
None,
)
def _as_quantity(a):
"""Convert argument to a Quantity (or raise NotImplementedError)."""
from astropy.units import Quantity
try:
return Quantity(a, copy=False, subok=True)
except Exception:
# If we cannot convert to Quantity, we should just bail.
raise NotImplementedError
def _as_quantities(*args):
"""Convert arguments to Quantity (or raise NotImplentedError)."""
from astropy.units import Quantity
try:
# Note: this should keep the dtype the same
return tuple(Quantity(a, copy=False, subok=True, dtype=None) for a in args)
except Exception:
# If we cannot convert to Quantity, we should just bail.
raise NotImplementedError
def _quantities2arrays(*args, unit_from_first=False):
"""Convert to arrays in units of the first argument that has a unit.
If unit_from_first, take the unit of the first argument regardless
whether it actually defined a unit (e.g., dimensionless for arrays).
"""
# Turn first argument into a quantity.
q = _as_quantity(args[0])
if len(args) == 1:
return (q.value,), q.unit
# If we care about the unit being explicit, then check whether this
# argument actually had a unit, or was likely inferred.
if not unit_from_first and (
q.unit is q._default_unit and not hasattr(args[0], "unit")
):
# Here, the argument could still be things like [10*u.one, 11.*u.one]),
# i.e., properly dimensionless. So, we only override with anything
# that has a unit not equivalent to dimensionless (fine to ignore other
# dimensionless units pass, even if explicitly given).
for arg in args[1:]:
trial = _as_quantity(arg)
if not trial.unit.is_equivalent(q.unit):
# Use any explicit unit not equivalent to dimensionless.
q = trial
break
# We use the private _to_own_unit method here instead of just
# converting everything to quantity and then do .to_value(qs0.unit)
# as we want to allow arbitrary unit for 0, inf, and nan.
try:
arrays = tuple((q._to_own_unit(arg)) for arg in args)
except TypeError:
raise NotImplementedError
return arrays, q.unit
def _iterable_helper(*args, out=None, **kwargs):
"""Convert arguments to Quantity, and treat possible 'out'."""
from astropy.units import Quantity
if out is not None:
if isinstance(out, Quantity):
kwargs["out"] = out.view(np.ndarray)
else:
# TODO: for an ndarray output, we could in principle
# try converting all Quantity to dimensionless.
raise NotImplementedError
arrays, unit = _quantities2arrays(*args)
return arrays, kwargs, unit, out
@function_helper
def concatenate(arrays, axis=0, out=None, **kwargs):
# TODO: make this smarter by creating an appropriately shaped
# empty output array and just filling it.
arrays, kwargs, unit, out = _iterable_helper(*arrays, out=out, axis=axis, **kwargs)
return (arrays,), kwargs, unit, out
@dispatched_function
def block(arrays):
# We need to override block since the numpy implementation can take two
# different paths, one for concatenation, one for creating a large empty
# result array in which parts are set. Each assumes array input and
# cannot be used directly. Since it would be very costly to inspect all
# arrays and then turn them back into a nested list, we just copy here the
# second implementation, np.core.shape_base._block_slicing, since it is
# shortest and easiest.
(arrays, list_ndim, result_ndim, final_size) = np.core.shape_base._block_setup(
arrays
)
shape, slices, arrays = np.core.shape_base._block_info_recursion(
arrays, list_ndim, result_ndim
)
# Here, one line of difference!
arrays, unit = _quantities2arrays(*arrays)
# Back to _block_slicing
dtype = np.result_type(*[arr.dtype for arr in arrays])
F_order = all(arr.flags["F_CONTIGUOUS"] for arr in arrays)
C_order = all(arr.flags["C_CONTIGUOUS"] for arr in arrays)
order = "F" if F_order and not C_order else "C"
result = np.empty(shape=shape, dtype=dtype, order=order)
for the_slice, arr in zip(slices, arrays):
result[(Ellipsis,) + the_slice] = arr
return result, unit, None
@function_helper
def choose(a, choices, out=None, **kwargs):
choices, kwargs, unit, out = _iterable_helper(*choices, out=out, **kwargs)
return (a, choices), kwargs, unit, out
@function_helper
def select(condlist, choicelist, default=0):
choicelist, kwargs, unit, out = _iterable_helper(*choicelist)
if default != 0:
default = (1 * unit)._to_own_unit(default)
return (condlist, choicelist, default), kwargs, unit, out
@dispatched_function
def piecewise(x, condlist, funclist, *args, **kw):
from astropy.units import Quantity
# Copied implementation from numpy.lib.function_base.piecewise,
# taking care of units of function outputs.
n2 = len(funclist)
# undocumented: single condition is promoted to a list of one condition
if np.isscalar(condlist) or (
not isinstance(condlist[0], (list, np.ndarray)) and x.ndim != 0
):
condlist = [condlist]
if any(isinstance(c, Quantity) for c in condlist):
raise NotImplementedError
condlist = np.array(condlist, dtype=bool)
n = len(condlist)
if n == n2 - 1: # compute the "otherwise" condition.
condelse = ~np.any(condlist, axis=0, keepdims=True)
condlist = np.concatenate([condlist, condelse], axis=0)
n += 1
elif n != n2:
raise ValueError(
f"with {n} condition(s), either {n} or {n + 1} functions are expected"
)
y = np.zeros(x.shape, x.dtype)
where = []
what = []
for k in range(n):
item = funclist[k]
if not callable(item):
where.append(condlist[k])
what.append(item)
else:
vals = x[condlist[k]]
if vals.size > 0:
where.append(condlist[k])
what.append(item(vals, *args, **kw))
what, unit = _quantities2arrays(*what)
for item, value in zip(where, what):
y[item] = value
return y, unit, None
@function_helper
def append(arr, values, *args, **kwargs):
arrays, unit = _quantities2arrays(arr, values, unit_from_first=True)
return arrays + args, kwargs, unit, None
@function_helper
def insert(arr, obj, values, *args, **kwargs):
from astropy.units import Quantity
if isinstance(obj, Quantity):
raise NotImplementedError
(arr, values), unit = _quantities2arrays(arr, values, unit_from_first=True)
return (arr, obj, values) + args, kwargs, unit, None
@function_helper
def pad(array, pad_width, mode="constant", **kwargs):
# pad dispatches only on array, so that must be a Quantity.
for key in "constant_values", "end_values":
value = kwargs.pop(key, None)
if value is None:
continue
if not isinstance(value, tuple):
value = (value,)
new_value = []
for v in value:
new_value.append(
tuple(array._to_own_unit(_v) for _v in v)
if isinstance(v, tuple)
else array._to_own_unit(v)
)
kwargs[key] = new_value
return (array.view(np.ndarray), pad_width, mode), kwargs, array.unit, None
@function_helper
def where(condition, *args):
from astropy.units import Quantity
if isinstance(condition, Quantity) or len(args) != 2:
raise NotImplementedError
args, unit = _quantities2arrays(*args)
return (condition,) + args, {}, unit, None
@function_helper(helps=({np.quantile, np.nanquantile}))
def quantile(a, q, *args, _q_unit=dimensionless_unscaled, **kwargs):
if len(args) >= 2:
out = args[1]
args = args[:1] + args[2:]
else:
out = kwargs.pop("out", None)
from astropy.units import Quantity
if isinstance(q, Quantity):
q = q.to_value(_q_unit)
(a,), kwargs, unit, out = _iterable_helper(a, out=out, **kwargs)
return (a, q) + args, kwargs, unit, out
@function_helper(helps={np.percentile, np.nanpercentile})
def percentile(a, q, *args, **kwargs):
from astropy.units import percent
return quantile(a, q, *args, _q_unit=percent, **kwargs)
@function_helper
def count_nonzero(a, *args, **kwargs):
return (a.value,) + args, kwargs, None, None
@function_helper(helps={np.isclose, np.allclose})
def close(a, b, rtol=1e-05, atol=1e-08, *args, **kwargs):
from astropy.units import Quantity
(a, b), unit = _quantities2arrays(a, b, unit_from_first=True)
# Allow number without a unit as having the unit.
atol = Quantity(atol, unit).value
return (a, b, rtol, atol) + args, kwargs, None, None
@dispatched_function
def array_equal(a1, a2, equal_nan=False):
try:
args, unit = _quantities2arrays(a1, a2)
except UnitConversionError:
return False, None, None
return np.array_equal(*args, equal_nan=equal_nan), None, None
@dispatched_function
def array_equiv(a1, a2):
try:
args, unit = _quantities2arrays(a1, a2)
except UnitConversionError:
return False, None, None
return np.array_equiv(*args), None, None
@function_helper(helps={np.dot, np.outer})
def dot_like(a, b, out=None):
from astropy.units import Quantity
a, b = _as_quantities(a, b)
unit = a.unit * b.unit
if out is not None:
if not isinstance(out, Quantity):
raise NotImplementedError
return tuple(x.view(np.ndarray) for x in (a, b, out)), {}, unit, out
else:
return (a.view(np.ndarray), b.view(np.ndarray)), {}, unit, None
@function_helper(
helps={
np.cross,
np.inner,
np.vdot,
np.tensordot,
np.kron,
np.correlate,
np.convolve,
}
)
def cross_like(a, b, *args, **kwargs):
a, b = _as_quantities(a, b)
unit = a.unit * b.unit
return (a.view(np.ndarray), b.view(np.ndarray)) + args, kwargs, unit, None
@function_helper
def einsum(subscripts, *operands, out=None, **kwargs):
from astropy.units import Quantity
if not isinstance(subscripts, str):
raise ValueError('only "subscripts" string mode supported for einsum.')
if out is not None:
if not isinstance(out, Quantity):
raise NotImplementedError
else:
kwargs["out"] = out.view(np.ndarray)
qs = _as_quantities(*operands)
unit = functools.reduce(operator.mul, (q.unit for q in qs), dimensionless_unscaled)
arrays = tuple(q.view(np.ndarray) for q in qs)
return (subscripts,) + arrays, kwargs, unit, out
@function_helper
def bincount(x, weights=None, minlength=0):
from astropy.units import Quantity
if isinstance(x, Quantity):
raise NotImplementedError
return (x, weights.value, minlength), {}, weights.unit, None
@function_helper
def digitize(x, bins, *args, **kwargs):
arrays, unit = _quantities2arrays(x, bins, unit_from_first=True)
return arrays + args, kwargs, None, None
def _check_bins(bins, unit):
from astropy.units import Quantity
check = _as_quantity(bins)
if check.ndim > 0:
return check.to_value(unit)
elif isinstance(bins, Quantity):
# bins should be an integer (or at least definitely not a Quantity).
raise NotImplementedError
else:
return bins
def _check_range(range, unit):
range = _as_quantity(range)
range = range.to_value(unit)
return range
@function_helper
def histogram(a, bins=10, range=None, weights=None, density=None):
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
a = _as_quantity(a)
if not isinstance(bins, str):
bins = _check_bins(bins, a.unit)
if range is not None:
range = _check_range(range, a.unit)
if density:
unit = (unit or 1) / a.unit
return (
(a.value, bins, range),
{"weights": weights, "density": density},
(unit, a.unit),
None,
)
@function_helper(helps=np.histogram_bin_edges)
def histogram_bin_edges(a, bins=10, range=None, weights=None):
# weights is currently unused
a = _as_quantity(a)
if not isinstance(bins, str):
bins = _check_bins(bins, a.unit)
if range is not None:
range = _check_range(range, a.unit)
return (a.value, bins, range, weights), {}, a.unit, None
@function_helper
def histogram2d(x, y, bins=10, range=None, weights=None, density=None):
from astropy.units import Quantity
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
x, y = _as_quantities(x, y)
try:
n = len(bins)
except TypeError:
# bins should be an integer (or at least definitely not a Quantity).
if isinstance(bins, Quantity):
raise NotImplementedError
else:
if n == 1:
raise NotImplementedError
elif n == 2 and not isinstance(bins, Quantity):
bins = [_check_bins(b, unit) for (b, unit) in zip(bins, (x.unit, y.unit))]
else:
bins = _check_bins(bins, x.unit)
y = y.to(x.unit)
if range is not None:
range = tuple(
_check_range(r, unit) for (r, unit) in zip(range, (x.unit, y.unit))
)
if density:
unit = (unit or 1) / x.unit / y.unit
return (
(x.value, y.value, bins, range),
{"weights": weights, "density": density},
(unit, x.unit, y.unit),
None,
)
@function_helper
def histogramdd(sample, bins=10, range=None, weights=None, density=None):
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
try:
# Sample is an ND-array.
_, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = _as_quantities(*sample)
sample_units = [s.unit for s in sample]
sample = [s.value for s in sample]
D = len(sample)
else:
sample = _as_quantity(sample)
sample_units = [sample.unit] * D
try:
M = len(bins)
except TypeError:
# bins should be an integer
from astropy.units import Quantity
if isinstance(bins, Quantity):
raise NotImplementedError
else:
if M != D:
raise ValueError(
"The dimension of bins must be equal to the dimension of the sample x."
)
bins = [_check_bins(b, unit) for (b, unit) in zip(bins, sample_units)]
if range is not None:
range = tuple(_check_range(r, unit) for (r, unit) in zip(range, sample_units))
if density:
unit = functools.reduce(operator.truediv, sample_units, (unit or 1))
return (
(sample, bins, range),
{"weights": weights, "density": density},
(unit, sample_units),
None,
)
@function_helper
def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue):
a = _as_quantity(a)
if prepend is not np._NoValue:
prepend = _as_quantity(prepend).to_value(a.unit)
if append is not np._NoValue:
append = _as_quantity(append).to_value(a.unit)
return (a.value, n, axis, prepend, append), {}, a.unit, None
@function_helper
def gradient(f, *varargs, **kwargs):
f = _as_quantity(f)
axis = kwargs.get("axis", None)
if axis is None:
n_axis = f.ndim
elif isinstance(axis, tuple):
n_axis = len(axis)
else:
n_axis = 1
if varargs:
varargs = _as_quantities(*varargs)
if len(varargs) == 1 and n_axis > 1:
varargs = varargs * n_axis
if varargs:
units = [f.unit / q.unit for q in varargs]
varargs = tuple(q.value for q in varargs)
else:
units = [f.unit] * n_axis
if len(units) == 1:
units = units[0]
return (f.value,) + varargs, kwargs, units, None
@function_helper
def logspace(start, stop, *args, **kwargs):
from astropy.units import LogQuantity, dex
if not isinstance(start, LogQuantity) or not isinstance(stop, LogQuantity):
raise NotImplementedError
# Get unit from end point as for linspace.
stop = stop.to(dex(stop.unit.physical_unit))
start = start.to(stop.unit)
unit = stop.unit.physical_unit
return (start.value, stop.value) + args, kwargs, unit, None
@function_helper
def geomspace(start, stop, *args, **kwargs):
# Get unit from end point as for linspace.
(stop, start), unit = _quantities2arrays(stop, start)
return (start, stop) + args, kwargs, unit, None
@function_helper
def interp(x, xp, fp, *args, **kwargs):
from astropy.units import Quantity
(x, xp), _ = _quantities2arrays(x, xp)
if isinstance(fp, Quantity):
unit = fp.unit
fp = fp.value
else:
unit = None
return (x, xp, fp) + args, kwargs, unit, None
@function_helper
def unique(
ar, return_index=False, return_inverse=False, return_counts=False, axis=None
):
unit = ar.unit
n_index = sum(bool(i) for i in (return_index, return_inverse, return_counts))
if n_index:
unit = [unit] + n_index * [None]
return (ar.value, return_index, return_inverse, return_counts, axis), {}, unit, None
@function_helper
def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
(ar1, ar2), unit = _quantities2arrays(ar1, ar2)
if return_indices:
unit = [unit, None, None]
return (ar1, ar2, assume_unique, return_indices), {}, unit, None
@function_helper(helps=(np.setxor1d, np.union1d, np.setdiff1d))
def twosetop(ar1, ar2, *args, **kwargs):
(ar1, ar2), unit = _quantities2arrays(ar1, ar2)
return (ar1, ar2) + args, kwargs, unit, None
@function_helper(helps=(np.isin, np.in1d))
def setcheckop(ar1, ar2, *args, **kwargs):
# This tests whether ar1 is in ar2, so we should change the unit of
# a1 to that of a2.
(ar2, ar1), unit = _quantities2arrays(ar2, ar1)
return (ar1, ar2) + args, kwargs, None, None
@dispatched_function
def apply_over_axes(func, a, axes):
# Copied straight from numpy/lib/shape_base, just to omit its
# val = asarray(a); if only it had been asanyarray, or just not there
# since a is assumed to an an array in the next line...
# Which is what we do here - we can only get here if it is a Quantity.
val = a
N = a.ndim
if np.array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = np.expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError(
"function is not returning an array of the correct shape"
)
# Returning unit is None to signal nothing should happen to
# the output.
return val, None, None
@dispatched_function
def array_repr(arr, *args, **kwargs):
# TODO: The addition of "unit='...'" doesn't worry about line
# length. Could copy & adapt _array_repr_implementation from
# numpy.core.arrayprint.py
cls_name = arr.__class__.__name__
fake_name = "_" * len(cls_name)
fake_cls = type(fake_name, (np.ndarray,), {})
no_unit = np.array_repr(arr.view(fake_cls), *args, **kwargs).replace(
fake_name, cls_name
)
unit_part = f"unit='{arr.unit}'"
pre, dtype, post = no_unit.rpartition("dtype")
if dtype:
return f"{pre}{unit_part}, {dtype}{post}", None, None
else:
return f"{no_unit[:-1]}, {unit_part})", None, None
@dispatched_function
def array_str(arr, *args, **kwargs):
# TODO: The addition of the unit doesn't worry about line length.
# Could copy & adapt _array_repr_implementation from
# numpy.core.arrayprint.py
no_unit = np.array_str(arr.value, *args, **kwargs)
return no_unit + arr._unitstr, None, None
@function_helper
def array2string(a, *args, **kwargs):
# array2string breaks on quantities as it tries to turn individual
# items into float, which works only for dimensionless. Since the
# defaults would not keep any unit anyway, this is rather pointless -
# we're better off just passing on the array view. However, one can
# also work around this by passing on a formatter (as is done in Angle).
# So, we do nothing if the formatter argument is present and has the
# relevant formatter for our dtype.
formatter = args[6] if len(args) >= 7 else kwargs.get("formatter", None)
if formatter is None:
a = a.value
else:
# See whether it covers our dtype.
from numpy.core.arrayprint import _get_format_function
with np.printoptions(formatter=formatter) as options:
try:
ff = _get_format_function(a.value, **options)
except Exception:
# Shouldn't happen, but possibly we're just not being smart
# enough, so let's pass things on as is.
pass
else:
# If the selected format function is that of numpy, we know
# things will fail
if "numpy" in ff.__module__:
a = a.value
return (a,) + args, kwargs, None, None
@function_helper
def diag(v, *args, **kwargs):
# Function works for *getting* the diagonal, but not *setting*.
# So, override always.
return (v.value,) + args, kwargs, v.unit, None
@function_helper(module=np.linalg)
def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
unit = a.unit
if compute_uv:
unit = (None, unit, None)
return ((a.view(np.ndarray), full_matrices, compute_uv, hermitian), {}, unit, None)
def _interpret_tol(tol, unit):
from astropy.units import Quantity
return Quantity(tol, unit).value
@function_helper(module=np.linalg)
def matrix_rank(M, tol=None, *args, **kwargs):
if tol is not None:
tol = _interpret_tol(tol, M.unit)
return (M.view(np.ndarray), tol) + args, kwargs, None, None
@function_helper(helps={np.linalg.inv, np.linalg.tensorinv})
def inv(a, *args, **kwargs):
return (a.view(np.ndarray),) + args, kwargs, 1 / a.unit, None
@function_helper(module=np.linalg)
def pinv(a, rcond=1e-15, *args, **kwargs):
rcond = _interpret_tol(rcond, a.unit)
return (a.view(np.ndarray), rcond) + args, kwargs, 1 / a.unit, None
@function_helper(module=np.linalg)
def det(a):
return (a.view(np.ndarray),), {}, a.unit ** a.shape[-1], None
@function_helper(helps={np.linalg.solve, np.linalg.tensorsolve})
def solve(a, b, *args, **kwargs):
a, b = _as_quantities(a, b)
return (
(a.view(np.ndarray), b.view(np.ndarray)) + args,
kwargs,
b.unit / a.unit,
None,
)
@function_helper(module=np.linalg)
def lstsq(a, b, rcond="warn"):
a, b = _as_quantities(a, b)
if rcond not in (None, "warn", -1):
rcond = _interpret_tol(rcond, a.unit)
return (
(a.view(np.ndarray), b.view(np.ndarray), rcond),
{},
(b.unit / a.unit, b.unit**2, None, a.unit),
None,
)
@function_helper(module=np.linalg)
def norm(x, ord=None, *args, **kwargs):
if ord == 0:
from astropy.units import dimensionless_unscaled
unit = dimensionless_unscaled
else:
unit = x.unit
return (x.view(np.ndarray), ord) + args, kwargs, unit, None
@function_helper(module=np.linalg)
def matrix_power(a, n):
return (a.value, n), {}, a.unit**n, None
@function_helper(module=np.linalg)
def cholesky(a):
return (a.value,), {}, a.unit**0.5, None
@function_helper(module=np.linalg)
def qr(a, mode="reduced"):
if mode.startswith("e"):
units = None
elif mode == "r":
units = a.unit
else:
from astropy.units import dimensionless_unscaled
units = (dimensionless_unscaled, a.unit)
return (a.value, mode), {}, units, None
@function_helper(helps={np.linalg.eig, np.linalg.eigh})
def eig(a, *args, **kwargs):
from astropy.units import dimensionless_unscaled
return (a.value,) + args, kwargs, (a.unit, dimensionless_unscaled), None
# ======================= np.lib.recfunctions =======================
@function_helper(module=np.lib.recfunctions)
def structured_to_unstructured(arr, *args, **kwargs):
"""
Convert a structured quantity to an unstructured one.
This only works if all the units are compatible.
"""
from astropy.units import StructuredUnit
target_unit = arr.unit.values()[0]
def replace_unit(x):
if isinstance(x, StructuredUnit):
return x._recursively_apply(replace_unit)
else:
return target_unit
to_unit = arr.unit._recursively_apply(replace_unit)
return (arr.to_value(to_unit),) + args, kwargs, target_unit, None
def _build_structured_unit(dtype, unit):
"""Build structured unit from dtype.
Parameters
----------
dtype : `numpy.dtype`
unit : `astropy.units.Unit`
Returns
-------
`astropy.units.Unit` or tuple
"""
if dtype.fields is None:
return unit
return tuple(_build_structured_unit(v[0], unit) for v in dtype.fields.values())
@function_helper(module=np.lib.recfunctions)
def unstructured_to_structured(arr, dtype, *args, **kwargs):
from astropy.units import StructuredUnit
target_unit = StructuredUnit(_build_structured_unit(dtype, arr.unit))
return (arr.to_value(arr.unit), dtype) + args, kwargs, target_unit, None
def _izip_units_flat(iterable):
"""Returns an iterator of collapsing any nested unit structure.
Parameters
----------
iterable : Iterable[StructuredUnit | Unit] or StructuredUnit
A structured unit or iterable thereof.
Yields
------
unit
"""
from astropy.units import StructuredUnit
# Make Structured unit (pass-through if it is already).
units = StructuredUnit(iterable)
# Yield from structured unit.
for v in units.values():
if isinstance(v, StructuredUnit):
yield from _izip_units_flat(v)
else:
yield v
@function_helper(helps=rfn.merge_arrays)
def merge_arrays(
seqarrays,
fill_value=-1,
flatten=False,
usemask=False,
asrecarray=False,
):
"""Merge structured Quantities field by field.
Like :func:`numpy.lib.recfunctions.merge_arrays`. Note that ``usemask`` and
``asrecarray`` are not supported at this time and will raise a ValueError if
not `False`.
"""
from astropy.units import Quantity, StructuredUnit
if asrecarray:
# TODO? implement if Quantity ever supports rec.array
raise ValueError("asrecarray=True is not supported.")
if usemask:
# TODO: use MaskedQuantity for this case
raise ValueError("usemask=True is not supported.")
# Do we have a single Quantity as input?
if isinstance(seqarrays, Quantity):
seqarrays = (seqarrays,)
# Note: this also converts ndarray -> Quantity[dimensionless]
seqarrays = _as_quantities(*seqarrays)
arrays = tuple(q.value for q in seqarrays)
units = tuple(q.unit for q in seqarrays)
if flatten:
unit = StructuredUnit(tuple(_izip_units_flat(units)))
elif len(arrays) == 1:
unit = StructuredUnit(units[0])
else:
unit = StructuredUnit(units)
return (
(arrays,),
dict(
fill_value=fill_value,
flatten=flatten,
usemask=usemask,
asrecarray=asrecarray,
),
unit,
None,
)
|
992f665c803a340b84245f400243d48312ffcbdbb3298498e3dff7959a3938b9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test the propagation of info on Quantity during operations."""
import copy
import numpy as np
from astropy import units as u
def assert_info_equal(a, b, ignore=set()):
a_info = a.info
b_info = b.info
for attr in (a_info.attr_names | b_info.attr_names) - ignore:
if attr == "unit":
assert a_info.unit.is_equivalent(b_info.unit)
else:
assert getattr(a_info, attr, None) == getattr(b_info, attr, None)
def assert_no_info(a):
assert "info" not in a.__dict__
class TestQuantityInfo:
@classmethod
def setup_class(self):
self.q = u.Quantity(np.arange(1.0, 5.0), "m/s")
self.q.info.name = "v"
self.q.info.description = "air speed of a african swallow"
def test_copy(self):
q_copy1 = self.q.copy()
assert_info_equal(q_copy1, self.q)
q_copy2 = copy.copy(self.q)
assert_info_equal(q_copy2, self.q)
q_copy3 = copy.deepcopy(self.q)
assert_info_equal(q_copy3, self.q)
def test_slice(self):
q_slice = self.q[1:3]
assert_info_equal(q_slice, self.q)
q_take = self.q.take([0, 1])
assert_info_equal(q_take, self.q)
def test_item(self):
# Scalars do not get info set (like for Column); TODO: is this OK?
q1 = self.q[1]
assert_no_info(q1)
q_item = self.q.item(1)
assert_no_info(q_item)
def test_iter(self):
# Scalars do not get info set.
for q in self.q:
assert_no_info(q)
for q in iter(self.q):
assert_no_info(q)
def test_change_to_equivalent_unit(self):
q1 = self.q.to(u.km / u.hr)
assert_info_equal(q1, self.q)
q2 = self.q.si
assert_info_equal(q2, self.q)
q3 = self.q.cgs
assert_info_equal(q3, self.q)
q4 = self.q.decompose()
assert_info_equal(q4, self.q)
def test_reshape(self):
q = self.q.reshape(-1, 1, 2)
assert_info_equal(q, self.q)
q2 = q.squeeze()
assert_info_equal(q2, self.q)
def test_insert(self):
q = self.q.copy()
q.insert(1, 1 * u.cm / u.hr)
assert_info_equal(q, self.q)
def test_unary_op(self):
q = -self.q
assert_no_info(q)
def test_binary_op(self):
q = self.q + self.q
assert_no_info(q)
def test_unit_change(self):
q = self.q * u.s
assert_no_info(q)
q2 = u.s / self.q
assert_no_info(q)
def test_inplace_unit_change(self):
# Not sure if it is logical to keep info here!
q = self.q.copy()
q *= u.s
assert_info_equal(q, self.q, ignore={"unit"})
class TestStructuredQuantity:
@classmethod
def setup_class(self):
value = np.array([(1.0, 2.0), (3.0, 4.0)], dtype=[("p", "f8"), ("v", "f8")])
self.q = u.Quantity(value, "m, m/s")
self.q.info.name = "pv"
self.q.info.description = "Location and speed"
def test_keying(self):
q_p = self.q["p"]
assert_no_info(q_p)
def test_slicing(self):
q = self.q[:1]
assert_info_equal(q, self.q)
def test_item(self):
# Scalars do not get info set.
q = self.q[1]
assert_no_info(q)
|
9bfcd60aac55cd373f286d7962090d5a1834e4282354c4c98ba226e3fba26d79 | # The purpose of these tests are to ensure that calling quantities using
# array methods returns quantities with the right units, or raises exceptions.
import sys
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.utils.compat import NUMPY_LT_1_21_1, NUMPY_LT_1_22
class TestQuantityArrayCopy:
"""
Test whether arrays are properly copied/used in place
"""
def test_copy_on_creation(self):
v = np.arange(1000.0)
q_nocopy = u.Quantity(v, "km/s", copy=False)
q_copy = u.Quantity(v, "km/s", copy=True)
v[0] = -1.0
assert q_nocopy[0].value == v[0]
assert q_copy[0].value != v[0]
def test_to_copies(self):
q = u.Quantity(np.arange(1.0, 100.0), "km/s")
q2 = q.to(u.m / u.s)
assert np.all(q.value != q2.value)
q3 = q.to(u.km / u.s)
assert np.all(q.value == q3.value)
q[0] = -1.0 * u.km / u.s
assert q[0].value != q3[0].value
def test_si_copies(self):
q = u.Quantity(np.arange(100.0), "m/s")
q2 = q.si
assert np.all(q.value == q2.value)
q[0] = -1.0 * u.m / u.s
assert q[0].value != q2[0].value
def test_getitem_is_view(self):
"""Check that [keys] work, and that, like ndarray, it returns
a view, so that changing one changes the other.
Also test that one can add axes (closes #1422)
"""
q = u.Quantity(np.arange(100.0), "m/s")
q_sel = q[10:20]
q_sel[0] = -1.0 * u.m / u.s
assert q_sel[0] == q[10]
# also check that getitem can do new axes
q2 = q[:, np.newaxis]
q2[10, 0] = -9 * u.m / u.s
assert np.all(q2.flatten() == q)
def test_flat(self):
q = u.Quantity(np.arange(9.0).reshape(3, 3), "m/s")
q_flat = q.flat
# check that a single item is a quantity (with the right value)
assert q_flat[8] == 8.0 * u.m / u.s
# and that getting a range works as well
assert np.all(q_flat[0:2] == np.arange(2.0) * u.m / u.s)
# as well as getting items via iteration
q_flat_list = [_q for _q in q.flat]
assert np.all(
u.Quantity(q_flat_list) == u.Quantity([_a for _a in q.value.flat], q.unit)
)
# check that flat works like a view of the real array
q_flat[8] = -1.0 * u.km / u.s
assert q_flat[8] == -1.0 * u.km / u.s
assert q[2, 2] == -1.0 * u.km / u.s
# while if one goes by an iterated item, a copy is made
q_flat_list[8] = -2 * u.km / u.s
assert q_flat_list[8] == -2.0 * u.km / u.s
assert q_flat[8] == -1.0 * u.km / u.s
assert q[2, 2] == -1.0 * u.km / u.s
class TestQuantityReshapeFuncs:
"""Test different ndarray methods that alter the array shape
tests: reshape, squeeze, ravel, flatten, transpose, swapaxes
"""
def test_reshape(self):
q = np.arange(6.0) * u.m
q_reshape = q.reshape(3, 2)
assert isinstance(q_reshape, u.Quantity)
assert q_reshape.unit == q.unit
assert np.all(q_reshape.value == q.value.reshape(3, 2))
def test_squeeze(self):
q = np.arange(6.0).reshape(6, 1) * u.m
q_squeeze = q.squeeze()
assert isinstance(q_squeeze, u.Quantity)
assert q_squeeze.unit == q.unit
assert np.all(q_squeeze.value == q.value.squeeze())
def test_ravel(self):
q = np.arange(6.0).reshape(3, 2) * u.m
q_ravel = q.ravel()
assert isinstance(q_ravel, u.Quantity)
assert q_ravel.unit == q.unit
assert np.all(q_ravel.value == q.value.ravel())
def test_flatten(self):
q = np.arange(6.0).reshape(3, 2) * u.m
q_flatten = q.flatten()
assert isinstance(q_flatten, u.Quantity)
assert q_flatten.unit == q.unit
assert np.all(q_flatten.value == q.value.flatten())
def test_transpose(self):
q = np.arange(6.0).reshape(3, 2) * u.m
q_transpose = q.transpose()
assert isinstance(q_transpose, u.Quantity)
assert q_transpose.unit == q.unit
assert np.all(q_transpose.value == q.value.transpose())
def test_swapaxes(self):
q = np.arange(6.0).reshape(3, 1, 2) * u.m
q_swapaxes = q.swapaxes(0, 2)
assert isinstance(q_swapaxes, u.Quantity)
assert q_swapaxes.unit == q.unit
assert np.all(q_swapaxes.value == q.value.swapaxes(0, 2))
@pytest.mark.xfail(
sys.byteorder == "big" and NUMPY_LT_1_21_1, reason="Numpy GitHub Issue 19153"
)
def test_flat_attributes(self):
"""While ``flat`` doesn't make a copy, it changes the shape."""
q = np.arange(6.0).reshape(3, 1, 2) * u.m
qf = q.flat
# flat shape is same as before reshaping
assert len(qf) == 6
# see TestQuantityArrayCopy.test_flat for tests of iteration
# and slicing and setting. Here we test the properties and methods to
# match `numpy.ndarray.flatiter`
assert qf.base is q
# testing the indices -- flat and full -- into the array
assert qf.coords == (0, 0, 0) # to start
assert qf.index == 0
# now consume the iterator
endindices = [(qf.index, qf.coords) for x in qf][-2] # next() oversteps
assert endindices[0] == 5
assert endindices[1] == (2, 0, 1) # shape of q - 1
# also check q_flat copies properly
q_flat_copy = qf.copy()
assert all(q_flat_copy == q.flatten())
assert isinstance(q_flat_copy, u.Quantity)
assert not np.may_share_memory(q_flat_copy, q)
class TestQuantityStatsFuncs:
"""
Test statistical functions
"""
def test_mean(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
assert_array_equal(np.mean(q1), 3.6 * u.m)
assert_array_equal(np.mean(q1, keepdims=True), [3.6] * u.m)
def test_mean_inplace(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
qi = 1.5 * u.s
qi2 = np.mean(q1, out=qi)
assert qi2 is qi
assert qi == 3.6 * u.m
def test_mean_where(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0, 7.0]) * u.m
assert_array_equal(np.mean(q1, where=q1 < 7 * u.m), 3.6 * u.m)
def test_std(self):
q1 = np.array([1.0, 2.0]) * u.m
assert_array_equal(np.std(q1), 0.5 * u.m)
assert_array_equal(q1.std(axis=-1, keepdims=True), [0.5] * u.m)
def test_std_inplace(self):
q1 = np.array([1.0, 2.0]) * u.m
qi = 1.5 * u.s
np.std(q1, out=qi)
assert qi == 0.5 * u.m
def test_std_where(self):
q1 = np.array([1.0, 2.0, 3.0]) * u.m
assert_array_equal(np.std(q1, where=q1 < 3 * u.m), 0.5 * u.m)
def test_var(self):
q1 = np.array([1.0, 2.0]) * u.m
assert_array_equal(np.var(q1), 0.25 * u.m**2)
assert_array_equal(q1.var(axis=0, keepdims=True), [0.25] * u.m**2)
def test_var_inplace(self):
q1 = np.array([1.0, 2.0]) * u.m
qi = 1.5 * u.s
np.var(q1, out=qi)
assert qi == 0.25 * u.m**2
def test_var_where(self):
q1 = np.array([1.0, 2.0, 3.0]) * u.m
assert_array_equal(np.var(q1, where=q1 < 3 * u.m), 0.25 * u.m**2)
def test_median(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
assert np.median(q1) == 4.0 * u.m
def test_median_inplace(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
qi = 1.5 * u.s
np.median(q1, out=qi)
assert qi == 4 * u.m
def test_min(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
assert np.min(q1) == 1.0 * u.m
def test_min_inplace(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
qi = 1.5 * u.s
np.min(q1, out=qi)
assert qi == 1.0 * u.m
def test_min_where(self):
q1 = np.array([0.0, 1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
assert np.min(q1, initial=10 * u.m, where=q1 > 0 * u.m) == 1.0 * u.m
def test_argmin(self):
q1 = np.array([6.0, 2.0, 4.0, 5.0, 6.0]) * u.m
assert np.argmin(q1) == 1
@pytest.mark.skipif(NUMPY_LT_1_22, reason="keepdims only introduced in numpy 1.22")
def test_argmin_keepdims(self):
q1 = np.array([[6.0, 2.0], [4.0, 5.0]]) * u.m
assert_array_equal(q1.argmin(axis=0, keepdims=True), np.array([[1, 0]]))
def test_max(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
assert np.max(q1) == 6.0 * u.m
def test_max_inplace(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
qi = 1.5 * u.s
np.max(q1, out=qi)
assert qi == 6.0 * u.m
def test_max_where(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0, 7.0]) * u.m
assert np.max(q1, initial=0 * u.m, where=q1 < 7 * u.m) == 6.0 * u.m
def test_argmax(self):
q1 = np.array([5.0, 2.0, 4.0, 5.0, 6.0]) * u.m
assert np.argmax(q1) == 4
@pytest.mark.skipif(NUMPY_LT_1_22, reason="keepdims only introduced in numpy 1.22")
def test_argmax_keepdims(self):
q1 = np.array([[6.0, 2.0], [4.0, 5.0]]) * u.m
assert_array_equal(q1.argmax(axis=0, keepdims=True), np.array([[0, 1]]))
def test_clip(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.km / u.m
c1 = q1.clip(1500, 5.5 * u.Mm / u.km)
assert np.all(c1 == np.array([1.5, 2.0, 4.0, 5.0, 5.5]) * u.km / u.m)
def test_clip_inplace(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.km / u.m
c1 = q1.clip(1500, 5.5 * u.Mm / u.km, out=q1)
assert np.all(q1 == np.array([1.5, 2.0, 4.0, 5.0, 5.5]) * u.km / u.m)
c1[0] = 10 * u.Mm / u.mm
assert np.all(c1.value == q1.value)
def test_conj(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.km / u.m
assert np.all(q1.conj() == q1)
def test_ptp(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
assert np.ptp(q1) == 5.0 * u.m
def test_ptp_inplace(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
qi = 1.5 * u.s
np.ptp(q1, out=qi)
assert qi == 5.0 * u.m
def test_round(self):
q1 = np.array([1.253, 2.253, 3.253]) * u.kg
assert np.all(np.round(q1) == np.array([1, 2, 3]) * u.kg)
assert np.all(np.round(q1, decimals=2) == np.round(q1.value, decimals=2) * u.kg)
assert np.all(q1.round(decimals=2) == q1.value.round(decimals=2) * u.kg)
def test_round_inplace(self):
q1 = np.array([1.253, 2.253, 3.253]) * u.kg
qi = np.zeros(3) * u.s
a = q1.round(decimals=2, out=qi)
assert a is qi
assert np.all(q1.round(decimals=2) == qi)
def test_sum(self):
q1 = np.array([1.0, 2.0, 6.0]) * u.m
assert np.all(q1.sum() == 9.0 * u.m)
assert np.all(np.sum(q1) == 9.0 * u.m)
q2 = np.array([[4.0, 5.0, 9.0], [1.0, 1.0, 1.0]]) * u.s
assert np.all(q2.sum(0) == np.array([5.0, 6.0, 10.0]) * u.s)
assert np.all(np.sum(q2, 0) == np.array([5.0, 6.0, 10.0]) * u.s)
def test_sum_inplace(self):
q1 = np.array([1.0, 2.0, 6.0]) * u.m
qi = 1.5 * u.s
np.sum(q1, out=qi)
assert qi == 9.0 * u.m
def test_sum_where(self):
q1 = np.array([1.0, 2.0, 6.0, 7.0]) * u.m
where = q1 < 7 * u.m
assert np.all(q1.sum(where=where) == 9.0 * u.m)
assert np.all(np.sum(q1, where=where) == 9.0 * u.m)
@pytest.mark.parametrize("initial", [0, 0 * u.m, 1 * u.km])
def test_sum_initial(self, initial):
q1 = np.array([1.0, 2.0, 6.0, 7.0]) * u.m
expected = 16 * u.m + initial
assert q1.sum(initial=initial) == expected
assert np.sum(q1, initial=initial) == expected
def test_sum_dimensionless_initial(self):
q1 = np.array([1.0, 2.0, 6.0, 7.0]) * u.one
assert q1.sum(initial=1000) == 1016 * u.one
@pytest.mark.parametrize("initial", [10, 1 * u.s])
def test_sum_initial_exception(self, initial):
q1 = np.array([1.0, 2.0, 6.0, 7.0]) * u.m
with pytest.raises(u.UnitsError):
q1.sum(initial=initial)
def test_cumsum(self):
q1 = np.array([1, 2, 6]) * u.m
assert np.all(q1.cumsum() == np.array([1, 3, 9]) * u.m)
assert np.all(np.cumsum(q1) == np.array([1, 3, 9]) * u.m)
q2 = np.array([4, 5, 9]) * u.s
assert np.all(q2.cumsum() == np.array([4, 9, 18]) * u.s)
assert np.all(np.cumsum(q2) == np.array([4, 9, 18]) * u.s)
def test_cumsum_inplace(self):
q1 = np.array([1, 2, 6]) * u.m
qi = np.ones(3) * u.s
np.cumsum(q1, out=qi)
assert np.all(qi == np.array([1, 3, 9]) * u.m)
q2 = q1
q1.cumsum(out=q1)
assert np.all(q2 == qi)
@pytest.mark.filterwarnings("ignore:The nansum method is deprecated")
def test_nansum(self):
q1 = np.array([1.0, 2.0, np.nan]) * u.m
assert np.all(q1.nansum() == 3.0 * u.m)
assert np.all(np.nansum(q1) == 3.0 * u.m)
q2 = np.array([[np.nan, 5.0, 9.0], [1.0, np.nan, 1.0]]) * u.s
assert np.all(q2.nansum(0) == np.array([1.0, 5.0, 10.0]) * u.s)
assert np.all(np.nansum(q2, 0) == np.array([1.0, 5.0, 10.0]) * u.s)
@pytest.mark.filterwarnings("ignore:The nansum method is deprecated")
def test_nansum_inplace(self):
q1 = np.array([1.0, 2.0, np.nan]) * u.m
qi = 1.5 * u.s
qout = q1.nansum(out=qi)
assert qout is qi
assert qi == np.nansum(q1.value) * q1.unit
qi2 = 1.5 * u.s
qout2 = np.nansum(q1, out=qi2)
assert qout2 is qi2
assert qi2 == np.nansum(q1.value) * q1.unit
@pytest.mark.xfail(
NUMPY_LT_1_22, reason="'where' keyword argument not supported for numpy < 1.22"
)
@pytest.mark.filterwarnings("ignore:The nansum method is deprecated")
def test_nansum_where(self):
q1 = np.array([1.0, 2.0, np.nan, 4.0]) * u.m
initial = 0 * u.m
where = q1 < 4 * u.m
assert np.all(q1.nansum(initial=initial, where=where) == 3.0 * u.m)
assert np.all(np.nansum(q1, initial=initial, where=where) == 3.0 * u.m)
def test_prod(self):
q1 = np.array([1, 2, 6]) * u.m
with pytest.raises(u.UnitsError) as exc:
q1.prod()
with pytest.raises(u.UnitsError) as exc:
np.prod(q1)
q2 = np.array([3.0, 4.0, 5.0]) * u.Unit(1)
assert q2.prod() == 60.0 * u.Unit(1)
assert np.prod(q2) == 60.0 * u.Unit(1)
def test_cumprod(self):
q1 = np.array([1, 2, 6]) * u.m
with pytest.raises(u.UnitsError) as exc:
q1.cumprod()
with pytest.raises(u.UnitsError) as exc:
np.cumprod(q1)
q2 = np.array([3, 4, 5]) * u.Unit(1)
assert np.all(q2.cumprod() == np.array([3, 12, 60]) * u.Unit(1))
assert np.all(np.cumprod(q2) == np.array([3, 12, 60]) * u.Unit(1))
def test_diff(self):
q1 = np.array([1.0, 2.0, 4.0, 10.0]) * u.m
assert np.all(q1.diff() == np.array([1.0, 2.0, 6.0]) * u.m)
assert np.all(np.diff(q1) == np.array([1.0, 2.0, 6.0]) * u.m)
def test_ediff1d(self):
q1 = np.array([1.0, 2.0, 4.0, 10.0]) * u.m
assert np.all(q1.ediff1d() == np.array([1.0, 2.0, 6.0]) * u.m)
assert np.all(np.ediff1d(q1) == np.array([1.0, 2.0, 6.0]) * u.m)
def test_dot_meth(self):
q1 = np.array([1.0, 2.0, 4.0, 10.0]) * u.m
q2 = np.array([3.0, 4.0, 5.0, 6.0]) * u.s
q3 = q1.dot(q2)
assert q3.value == np.dot(q1.value, q2.value)
assert q3.unit == u.m * u.s
def test_trace_func(self):
q = np.array([[1.0, 2.0], [3.0, 4.0]]) * u.m
assert np.trace(q) == 5.0 * u.m
def test_trace_meth(self):
q1 = np.array([[1.0, 2.0], [3.0, 4.0]]) * u.m
assert q1.trace() == 5.0 * u.m
cont = u.Quantity(4.0, u.s)
q2 = np.array([[3.0, 4.0], [5.0, 6.0]]) * u.m
q2.trace(out=cont)
assert cont == 9.0 * u.m
def test_clip_func(self):
q = np.arange(10) * u.m
assert np.all(
np.clip(q, 3 * u.m, 6 * u.m)
== np.array([3.0, 3.0, 3.0, 3.0, 4.0, 5.0, 6.0, 6.0, 6.0, 6.0]) * u.m
)
def test_clip_meth(self):
expected = np.array([3.0, 3.0, 3.0, 3.0, 4.0, 5.0, 6.0, 6.0, 6.0, 6.0]) * u.m
q1 = np.arange(10) * u.m
q3 = q1.clip(3 * u.m, 6 * u.m)
assert np.all(q1.clip(3 * u.m, 6 * u.m) == expected)
cont = np.zeros(10) * u.s
q1.clip(3 * u.m, 6 * u.m, out=cont)
assert np.all(cont == expected)
class TestArrayConversion:
"""
Test array conversion methods
"""
def test_item(self):
q1 = u.Quantity(np.array([1, 2, 3]), u.m / u.km, dtype=int)
assert q1.item(1) == 2 * q1.unit
q1.itemset(1, 1)
assert q1.item(1) == 1000 * u.m / u.km
q1.itemset(1, 100 * u.cm / u.km)
assert q1.item(1) == 1 * u.m / u.km
with pytest.raises(TypeError):
q1.itemset(1, 1.5 * u.m / u.km)
with pytest.raises(ValueError):
q1.itemset()
q1[1] = 1
assert q1[1] == 1000 * u.m / u.km
q1[1] = 100 * u.cm / u.km
assert q1[1] == 1 * u.m / u.km
with pytest.raises(TypeError):
q1[1] = 1.5 * u.m / u.km
def test_take_put(self):
q1 = np.array([1, 2, 3]) * u.m / u.km
assert q1.take(1) == 2 * u.m / u.km
assert all(q1.take((0, 2)) == np.array([1, 3]) * u.m / u.km)
q1.put((1, 2), (3, 4))
assert np.all(q1.take((1, 2)) == np.array([3000, 4000]) * q1.unit)
q1.put(0, 500 * u.cm / u.km)
assert q1.item(0) == 5 * u.m / u.km
def test_slice(self):
"""Test that setitem changes the unit if needed (or ignores it for
values where that is allowed; viz., #2695)"""
q2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) * u.km / u.m
q1 = q2.copy()
q2[0, 0] = 10000.0
assert q2.unit == q1.unit
assert q2[0, 0].value == 10.0
q2[0] = 9.0 * u.Mm / u.km
assert all(q2.flatten()[:3].value == np.array([9.0, 9.0, 9.0]))
q2[0, :-1] = 8000.0
assert all(q2.flatten()[:3].value == np.array([8.0, 8.0, 9.0]))
with pytest.raises(u.UnitsError):
q2[1, 1] = 10 * u.s
# just to be sure, repeat with a dimensionfull unit
q3 = u.Quantity(np.arange(10.0), "m/s")
q3[5] = 100.0 * u.cm / u.s
assert q3[5].value == 1.0
# and check unit is ignored for 0, inf, nan, where that is reasonable
q3[5] = 0.0
assert q3[5] == 0.0
q3[5] = np.inf
assert np.isinf(q3[5])
q3[5] = np.nan
assert np.isnan(q3[5])
def test_fill(self):
q1 = np.array([1, 2, 3]) * u.m / u.km
q1.fill(2)
assert np.all(q1 == 2000 * u.m / u.km)
def test_repeat_compress_diagonal(self):
q1 = np.array([1, 2, 3]) * u.m / u.km
q2 = q1.repeat(2)
assert q2.unit == q1.unit
assert all(q2.value == q1.value.repeat(2))
q2.sort()
assert q2.unit == q1.unit
q2 = q1.compress(np.array([True, True, False, False]))
assert q2.unit == q1.unit
assert all(q2.value == q1.value.compress(np.array([True, True, False, False])))
q1 = np.array([[1, 2], [3, 4]]) * u.m / u.km
q2 = q1.diagonal()
assert q2.unit == q1.unit
assert all(q2.value == q1.value.diagonal())
def test_view(self):
q1 = np.array([1, 2, 3], dtype=np.int64) * u.m / u.km
q2 = q1.view(np.ndarray)
assert not hasattr(q2, "unit")
q3 = q2.view(u.Quantity)
assert q3._unit is None
# MaskedArray copies and properties assigned in __dict__
q4 = np.ma.MaskedArray(q1)
assert q4._unit is q1._unit
q5 = q4.view(u.Quantity)
assert q5.unit is q1.unit
def test_slice_to_quantity(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2003
"""
a = np.random.uniform(size=(10, 8))
x, y, z = a[:, 1:4].T * u.km / u.s
total = np.sum(a[:, 1] * u.km / u.s - x)
assert isinstance(total, u.Quantity)
assert total == (0.0 * u.km / u.s)
def test_byte_type_view_field_changes(self):
q1 = np.array([1, 2, 3], dtype=np.int64) * u.m / u.km
q2 = q1.byteswap()
assert q2.unit == q1.unit
assert all(q2.value == q1.value.byteswap())
q2 = q1.astype(np.float64)
assert all(q2 == q1)
assert q2.dtype == np.float64
q2a = q1.getfield(np.int32, offset=0)
q2b = q1.byteswap().getfield(np.int32, offset=4)
assert q2a.unit == q1.unit
assert all(q2b.byteswap() == q2a)
def test_sort(self):
q1 = np.array([1.0, 5.0, 2.0, 4.0]) * u.km / u.m
i = q1.argsort()
assert not hasattr(i, "unit")
q1.sort()
i = q1.searchsorted([1500, 2500])
assert not hasattr(i, "unit")
assert all(
i == q1.to(u.dimensionless_unscaled).value.searchsorted([1500, 2500])
)
def test_not_implemented(self):
q1 = np.array([1, 2, 3]) * u.m / u.km
with pytest.raises(NotImplementedError):
q1.choose([0, 0, 1])
with pytest.raises(NotImplementedError):
q1.tolist()
with pytest.raises(NotImplementedError):
q1.tostring()
with pytest.raises(NotImplementedError):
q1.tobytes()
with pytest.raises(NotImplementedError):
q1.tofile(0)
with pytest.raises(NotImplementedError):
q1.dump("a.a")
with pytest.raises(NotImplementedError):
q1.dumps()
class TestRecArray:
"""Record arrays are not specifically supported, but we should not
prevent their use unnecessarily"""
def setup_method(self):
self.ra = (
np.array(np.arange(12.0).reshape(4, 3)).view(dtype="f8,f8,f8").squeeze()
)
def test_creation(self):
qra = u.Quantity(self.ra, u.m)
assert np.all(qra[:2].value == self.ra[:2])
def test_equality(self):
qra = u.Quantity(self.ra, u.m)
qra[1] = qra[2]
assert qra[1] == qra[2]
|
0a800faea5039750552079f4d032c82766ed678dc3aeebcc8dbed86ebfb4d5be | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test the Logarithmic Units and Quantities
"""
import itertools
import pickle
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import constants as c
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
lu_units = [u.dex, u.mag, u.decibel]
lu_subclasses = [u.DexUnit, u.MagUnit, u.DecibelUnit]
lq_subclasses = [u.Dex, u.Magnitude, u.Decibel]
pu_sample = (u.dimensionless_unscaled, u.m, u.g / u.s**2, u.Jy)
class TestLogUnitCreation:
def test_logarithmic_units(self):
"""Check logarithmic units are set up correctly."""
assert u.dB.to(u.dex) == 0.1
assert u.dex.to(u.mag) == -2.5
assert u.mag.to(u.dB) == -4
@pytest.mark.parametrize("lu_unit, lu_cls", zip(lu_units, lu_subclasses))
def test_callable_units(self, lu_unit, lu_cls):
assert isinstance(lu_unit, u.UnitBase)
assert callable(lu_unit)
assert lu_unit._function_unit_class is lu_cls
@pytest.mark.parametrize("lu_unit", lu_units)
def test_equality_to_normal_unit_for_dimensionless(self, lu_unit):
lu = lu_unit()
assert lu == lu._default_function_unit # eg, MagUnit() == u.mag
assert lu._default_function_unit == lu # and u.mag == MagUnit()
@pytest.mark.parametrize(
"lu_unit, physical_unit", itertools.product(lu_units, pu_sample)
)
def test_call_units(self, lu_unit, physical_unit):
"""Create a LogUnit subclass using the callable unit and physical unit,
and do basic check that output is right."""
lu1 = lu_unit(physical_unit)
assert lu1.physical_unit == physical_unit
assert lu1.function_unit == lu1._default_function_unit
def test_call_invalid_unit(self):
with pytest.raises(TypeError):
u.mag([])
with pytest.raises(ValueError):
u.mag(u.mag())
@pytest.mark.parametrize(
"lu_cls, physical_unit",
itertools.product(lu_subclasses + [u.LogUnit], pu_sample),
)
def test_subclass_creation(self, lu_cls, physical_unit):
"""Create a LogUnit subclass object for given physical unit,
and do basic check that output is right."""
lu1 = lu_cls(physical_unit)
assert lu1.physical_unit == physical_unit
assert lu1.function_unit == lu1._default_function_unit
lu2 = lu_cls(physical_unit, function_unit=2 * lu1._default_function_unit)
assert lu2.physical_unit == physical_unit
assert lu2.function_unit == u.Unit(2 * lu2._default_function_unit)
with pytest.raises(ValueError):
lu_cls(physical_unit, u.m)
def test_lshift_magnitude(self):
mag = 1.0 << u.ABmag
assert isinstance(mag, u.Magnitude)
assert mag.unit == u.ABmag
assert mag.value == 1.0
# same test for an array, which should produce a view
a2 = np.arange(10.0)
q2 = a2 << u.ABmag
assert isinstance(q2, u.Magnitude)
assert q2.unit == u.ABmag
assert np.all(q2.value == a2)
a2[9] = 0.0
assert np.all(q2.value == a2)
# a different magnitude unit
mag = 10.0 << u.STmag
assert isinstance(mag, u.Magnitude)
assert mag.unit == u.STmag
assert mag.value == 10.0
def test_ilshift_magnitude(self):
# test in-place operation and conversion
mag_fnu_cgs = u.mag(u.erg / u.s / u.cm**2 / u.Hz)
m = np.arange(10.0) * u.mag(u.Jy)
jy = m.physical
m2 = m << mag_fnu_cgs
assert np.all(m2 == m.to(mag_fnu_cgs))
m2 = m
m <<= mag_fnu_cgs
assert m is m2 # Check it was done in-place!
assert np.all(m.value == m2.value)
assert m.unit == mag_fnu_cgs
# Check it works if equivalencies are in-place.
with u.add_enabled_equivalencies(u.spectral_density(5500 * u.AA)):
st = jy.to(u.ST)
m <<= u.STmag
assert m is m2
assert_quantity_allclose(m.physical, st)
assert m.unit == u.STmag
def test_lshift_errors(self):
m = np.arange(10.0) * u.mag(u.Jy)
with pytest.raises(u.UnitsError):
m << u.STmag
with pytest.raises(u.UnitsError):
m << u.Jy
with pytest.raises(u.UnitsError):
m <<= u.STmag
with pytest.raises(u.UnitsError):
m <<= u.Jy
def test_predefined_magnitudes():
assert_quantity_allclose(
(-21.1 * u.STmag).physical, 1.0 * u.erg / u.cm**2 / u.s / u.AA
)
assert_quantity_allclose(
(-48.6 * u.ABmag).physical, 1.0 * u.erg / u.cm**2 / u.s / u.Hz
)
assert_quantity_allclose((0 * u.M_bol).physical, c.L_bol0)
assert_quantity_allclose(
(0 * u.m_bol).physical, c.L_bol0 / (4.0 * np.pi * (10.0 * c.pc) ** 2)
)
def test_predefined_reinitialisation():
assert u.mag("STflux") == u.STmag
assert u.mag("ABflux") == u.ABmag
assert u.mag("Bol") == u.M_bol
assert u.mag("bol") == u.m_bol
# required for backwards-compatibility, at least unless deprecated
assert u.mag("ST") == u.STmag
assert u.mag("AB") == u.ABmag
def test_predefined_string_roundtrip():
"""Ensure round-tripping; see #5015"""
assert u.Unit(u.STmag.to_string()) == u.STmag
assert u.Unit(u.ABmag.to_string()) == u.ABmag
assert u.Unit(u.M_bol.to_string()) == u.M_bol
assert u.Unit(u.m_bol.to_string()) == u.m_bol
def test_inequality():
"""Check __ne__ works (regression for #5342)."""
lu1 = u.mag(u.Jy)
lu2 = u.dex(u.Jy)
lu3 = u.mag(u.Jy**2)
lu4 = lu3 - lu1
assert lu1 != lu2
assert lu1 != lu3
assert lu1 == lu4
class TestLogUnitStrings:
def test_str(self):
"""Do some spot checks that str, repr, etc. work as expected."""
lu1 = u.mag(u.Jy)
assert str(lu1) == "mag(Jy)"
assert repr(lu1) == 'Unit("mag(Jy)")'
assert lu1.to_string("generic") == "mag(Jy)"
with pytest.raises(ValueError):
lu1.to_string("fits")
with pytest.raises(ValueError):
lu1.to_string(format="cds")
lu2 = u.dex()
assert str(lu2) == "dex"
assert repr(lu2) == 'Unit("dex(1)")'
assert lu2.to_string() == "dex(1)"
lu3 = u.MagUnit(u.Jy, function_unit=2 * u.mag)
assert str(lu3) == "2 mag(Jy)"
assert repr(lu3) == 'MagUnit("Jy", unit="2 mag")'
assert lu3.to_string() == "2 mag(Jy)"
lu4 = u.mag(u.ct)
assert lu4.to_string("generic") == "mag(ct)"
latex_str = r"$\mathrm{mag}$$\mathrm{\left( \mathrm{ct} \right)}$"
assert lu4.to_string("latex") == latex_str
assert lu4.to_string("latex_inline") == latex_str
assert lu4._repr_latex_() == latex_str
lu5 = u.mag(u.ct / u.s)
assert lu5.to_string("latex") == (
r"$\mathrm{mag}$$\mathrm{\left( " r"\mathrm{\frac{ct}{s}} \right)}$"
)
latex_str = r"$\mathrm{mag}$$\mathrm{\left( \mathrm{ct\,s^{-1}} " r"\right)}$"
assert lu5.to_string("latex_inline") == latex_str
class TestLogUnitConversion:
@pytest.mark.parametrize(
"lu_unit, physical_unit", itertools.product(lu_units, pu_sample)
)
def test_physical_unit_conversion(self, lu_unit, physical_unit):
"""Check various LogUnit subclasses are equivalent and convertible
to their non-log counterparts."""
lu1 = lu_unit(physical_unit)
assert lu1.is_equivalent(physical_unit)
assert lu1.to(physical_unit, 0.0) == 1.0
assert physical_unit.is_equivalent(lu1)
assert physical_unit.to(lu1, 1.0) == 0.0
pu = u.Unit(8.0 * physical_unit)
assert lu1.is_equivalent(physical_unit)
assert lu1.to(pu, 0.0) == 0.125
assert pu.is_equivalent(lu1)
assert_allclose(pu.to(lu1, 0.125), 0.0, atol=1.0e-15)
# Check we round-trip.
value = np.linspace(0.0, 10.0, 6)
assert_allclose(pu.to(lu1, lu1.to(pu, value)), value, atol=1.0e-15)
# And that we're not just returning True all the time.
pu2 = u.g
assert not lu1.is_equivalent(pu2)
with pytest.raises(u.UnitsError):
lu1.to(pu2)
assert not pu2.is_equivalent(lu1)
with pytest.raises(u.UnitsError):
pu2.to(lu1)
@pytest.mark.parametrize("lu_unit", lu_units)
def test_container_unit_conversion(self, lu_unit):
"""Check that conversion to logarithmic units (u.mag, u.dB, u.dex)
is only possible when the physical unit is dimensionless."""
values = np.linspace(0.0, 10.0, 6)
lu1 = lu_unit(u.dimensionless_unscaled)
assert lu1.is_equivalent(lu1.function_unit)
assert_allclose(lu1.to(lu1.function_unit, values), values)
lu2 = lu_unit(u.Jy)
assert not lu2.is_equivalent(lu2.function_unit)
with pytest.raises(u.UnitsError):
lu2.to(lu2.function_unit, values)
@pytest.mark.parametrize(
"flu_unit, tlu_unit, physical_unit",
itertools.product(lu_units, lu_units, pu_sample),
)
def test_subclass_conversion(self, flu_unit, tlu_unit, physical_unit):
"""Check various LogUnit subclasses are equivalent and convertible
to each other if they correspond to equivalent physical units."""
values = np.linspace(0.0, 10.0, 6)
flu = flu_unit(physical_unit)
tlu = tlu_unit(physical_unit)
assert flu.is_equivalent(tlu)
assert_allclose(flu.to(tlu), flu.function_unit.to(tlu.function_unit))
assert_allclose(
flu.to(tlu, values), values * flu.function_unit.to(tlu.function_unit)
)
tlu2 = tlu_unit(u.Unit(100.0 * physical_unit))
assert flu.is_equivalent(tlu2)
# Check that we round-trip.
assert_allclose(flu.to(tlu2, tlu2.to(flu, values)), values, atol=1.0e-15)
tlu3 = tlu_unit(physical_unit.to_system(u.si)[0])
assert flu.is_equivalent(tlu3)
assert_allclose(flu.to(tlu3, tlu3.to(flu, values)), values, atol=1.0e-15)
tlu4 = tlu_unit(u.g)
assert not flu.is_equivalent(tlu4)
with pytest.raises(u.UnitsError):
flu.to(tlu4, values)
def test_unit_decomposition(self):
lu = u.mag(u.Jy)
assert lu.decompose() == u.mag(u.Jy.decompose())
assert lu.decompose().physical_unit.bases == [u.kg, u.s]
assert lu.si == u.mag(u.Jy.si)
assert lu.si.physical_unit.bases == [u.kg, u.s]
assert lu.cgs == u.mag(u.Jy.cgs)
assert lu.cgs.physical_unit.bases == [u.g, u.s]
def test_unit_multiple_possible_equivalencies(self):
lu = u.mag(u.Jy)
assert lu.is_equivalent(pu_sample)
def test_magnitude_conversion_fails_message(self):
"""Check that "dimensionless" magnitude units include a message in their
exception text suggesting a possible cause of the problem.
"""
with pytest.raises(
u.UnitConversionError,
match="Did you perhaps subtract magnitudes so the unit got lost?",
):
(10 * u.ABmag - 2 * u.ABmag).to(u.nJy)
class TestLogUnitArithmetic:
def test_multiplication_division(self):
"""Check that multiplication/division with other units is only
possible when the physical unit is dimensionless, and that this
turns the unit into a normal one."""
lu1 = u.mag(u.Jy)
with pytest.raises(u.UnitsError):
lu1 * u.m
with pytest.raises(u.UnitsError):
u.m * lu1
with pytest.raises(u.UnitsError):
lu1 / lu1
for unit in (u.dimensionless_unscaled, u.m, u.mag, u.dex):
with pytest.raises(u.UnitsError):
lu1 / unit
lu2 = u.mag(u.dimensionless_unscaled)
with pytest.raises(u.UnitsError):
lu2 * lu1
with pytest.raises(u.UnitsError):
lu2 / lu1
# But dimensionless_unscaled can be cancelled.
assert lu2 / lu2 == u.dimensionless_unscaled
# With dimensionless, normal units are OK, but we return a plain unit.
tf = lu2 * u.m
tr = u.m * lu2
for t in (tf, tr):
assert not isinstance(t, type(lu2))
assert t == lu2.function_unit * u.m
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(lu2.physical_unit)
# Now we essentially have a LogUnit with a prefactor of 100,
# so should be equivalent again.
t = tf / u.cm
with u.set_enabled_equivalencies(u.logarithmic()):
assert t.is_equivalent(lu2.function_unit)
assert_allclose(
t.to(u.dimensionless_unscaled, np.arange(3.0) / 100.0),
lu2.to(lu2.physical_unit, np.arange(3.0)),
)
# If we effectively remove lu1, a normal unit should be returned.
t2 = tf / lu2
assert not isinstance(t2, type(lu2))
assert t2 == u.m
t3 = tf / lu2.function_unit
assert not isinstance(t3, type(lu2))
assert t3 == u.m
# For completeness, also ensure non-sensical operations fail
with pytest.raises(TypeError):
lu1 * object()
with pytest.raises(TypeError):
slice(None) * lu1
with pytest.raises(TypeError):
lu1 / []
with pytest.raises(TypeError):
1 / lu1
@pytest.mark.parametrize("power", (2, 0.5, 1, 0))
def test_raise_to_power(self, power):
"""Check that raising LogUnits to some power is only possible when the
physical unit is dimensionless, and that conversion is turned off when
the resulting logarithmic unit (such as mag**2) is incompatible."""
lu1 = u.mag(u.Jy)
if power == 0:
assert lu1**power == u.dimensionless_unscaled
elif power == 1:
assert lu1**power == lu1
else:
with pytest.raises(u.UnitsError):
lu1**power
# With dimensionless, though, it works, but returns a normal unit.
lu2 = u.mag(u.dimensionless_unscaled)
t = lu2**power
if power == 0:
assert t == u.dimensionless_unscaled
elif power == 1:
assert t == lu2
else:
assert not isinstance(t, type(lu2))
assert t == lu2.function_unit**power
# also check we roundtrip
t2 = t ** (1.0 / power)
assert t2 == lu2.function_unit
with u.set_enabled_equivalencies(u.logarithmic()):
assert_allclose(
t2.to(u.dimensionless_unscaled, np.arange(3.0)),
lu2.to(lu2.physical_unit, np.arange(3.0)),
)
@pytest.mark.parametrize("other", pu_sample)
def test_addition_subtraction_to_normal_units_fails(self, other):
lu1 = u.mag(u.Jy)
with pytest.raises(u.UnitsError):
lu1 + other
with pytest.raises(u.UnitsError):
lu1 - other
with pytest.raises(u.UnitsError):
other - lu1
def test_addition_subtraction_to_non_units_fails(self):
lu1 = u.mag(u.Jy)
with pytest.raises(TypeError):
lu1 + 1.0
with pytest.raises(TypeError):
lu1 - [1.0, 2.0, 3.0]
@pytest.mark.parametrize(
"other",
(
u.mag,
u.mag(),
u.mag(u.Jy),
u.mag(u.m),
u.Unit(2 * u.mag),
u.MagUnit("", 2.0 * u.mag),
),
)
def test_addition_subtraction(self, other):
"""Check physical units are changed appropriately"""
lu1 = u.mag(u.Jy)
other_pu = getattr(other, "physical_unit", u.dimensionless_unscaled)
lu_sf = lu1 + other
assert lu_sf.is_equivalent(lu1.physical_unit * other_pu)
lu_sr = other + lu1
assert lu_sr.is_equivalent(lu1.physical_unit * other_pu)
lu_df = lu1 - other
assert lu_df.is_equivalent(lu1.physical_unit / other_pu)
lu_dr = other - lu1
assert lu_dr.is_equivalent(other_pu / lu1.physical_unit)
def test_complicated_addition_subtraction(self):
"""for fun, a more complicated example of addition and subtraction"""
dm0 = u.Unit("DM", 1.0 / (4.0 * np.pi * (10.0 * u.pc) ** 2))
lu_dm = u.mag(dm0)
lu_absST = u.STmag - lu_dm
assert lu_absST.is_equivalent(u.erg / u.s / u.AA)
def test_neg_pos(self):
lu1 = u.mag(u.Jy)
neg_lu = -lu1
assert neg_lu != lu1
assert neg_lu.physical_unit == u.Jy**-1
assert -neg_lu == lu1
pos_lu = +lu1
assert pos_lu is not lu1
assert pos_lu == lu1
def test_pickle():
lu1 = u.dex(u.cm / u.s**2)
s = pickle.dumps(lu1)
lu2 = pickle.loads(s)
assert lu1 == lu2
def test_hashable():
lu1 = u.dB(u.mW)
lu2 = u.dB(u.m)
lu3 = u.dB(u.mW)
assert hash(lu1) != hash(lu2)
assert hash(lu1) == hash(lu3)
luset = {lu1, lu2, lu3}
assert len(luset) == 2
class TestLogQuantityCreation:
@pytest.mark.parametrize(
"lq, lu", zip(lq_subclasses + [u.LogQuantity], lu_subclasses + [u.LogUnit])
)
def test_logarithmic_quantities(self, lq, lu):
"""Check logarithmic quantities are all set up correctly"""
assert lq._unit_class == lu
assert type(lu()._quantity_class(1.0)) is lq
@pytest.mark.parametrize(
"lq_cls, physical_unit", itertools.product(lq_subclasses, pu_sample)
)
def test_subclass_creation(self, lq_cls, physical_unit):
"""Create LogQuantity subclass objects for some physical units,
and basic check on transformations"""
value = np.arange(1.0, 10.0)
log_q = lq_cls(value * physical_unit)
assert log_q.unit.physical_unit == physical_unit
assert log_q.unit.function_unit == log_q.unit._default_function_unit
assert_allclose(log_q.physical.value, value)
with pytest.raises(ValueError):
lq_cls(value, physical_unit)
@pytest.mark.parametrize(
"unit",
(
u.mag,
u.mag(),
u.mag(u.Jy),
u.mag(u.m),
u.Unit(2 * u.mag),
u.MagUnit("", 2.0 * u.mag),
u.MagUnit(u.Jy, -1 * u.mag),
u.MagUnit(u.m, -2.0 * u.mag),
),
)
def test_different_units(self, unit):
q = u.Magnitude(1.23, unit)
assert q.unit.function_unit == getattr(unit, "function_unit", unit)
assert q.unit.physical_unit is getattr(
unit, "physical_unit", u.dimensionless_unscaled
)
@pytest.mark.parametrize(
"value, unit",
(
(1.0 * u.mag(u.Jy), None),
(1.0 * u.dex(u.Jy), None),
(1.0 * u.mag(u.W / u.m**2 / u.Hz), u.mag(u.Jy)),
(1.0 * u.dex(u.W / u.m**2 / u.Hz), u.mag(u.Jy)),
),
)
def test_function_values(self, value, unit):
lq = u.Magnitude(value, unit)
assert lq == value
assert lq.unit.function_unit == u.mag
assert lq.unit.physical_unit == getattr(
unit, "physical_unit", value.unit.physical_unit
)
@pytest.mark.parametrize(
"unit",
(
u.mag(),
u.mag(u.Jy),
u.mag(u.m),
u.MagUnit("", 2.0 * u.mag),
u.MagUnit(u.Jy, -1 * u.mag),
u.MagUnit(u.m, -2.0 * u.mag),
),
)
def test_indirect_creation(self, unit):
q1 = 2.5 * unit
assert isinstance(q1, u.Magnitude)
assert q1.value == 2.5
assert q1.unit == unit
pv = 100.0 * unit.physical_unit
q2 = unit * pv
assert q2.unit == unit
assert q2.unit.physical_unit == pv.unit
assert q2.to_value(unit.physical_unit) == 100.0
assert (q2._function_view / u.mag).to_value(1) == -5.0
q3 = unit / 0.4
assert q3 == q1
def test_from_view(self):
# Cannot view a physical quantity as a function quantity, since the
# values would change.
q = [100.0, 1000.0] * u.cm / u.s**2
with pytest.raises(TypeError):
q.view(u.Dex)
# But fine if we have the right magnitude.
q = [2.0, 3.0] * u.dex
lq = q.view(u.Dex)
assert isinstance(lq, u.Dex)
assert lq.unit.physical_unit == u.dimensionless_unscaled
assert np.all(q == lq)
def test_using_quantity_class(self):
"""Check that we can use Quantity if we have subok=True"""
# following issue #5851
lu = u.dex(u.AA)
with pytest.raises(u.UnitTypeError):
u.Quantity(1.0, lu)
q = u.Quantity(1.0, lu, subok=True)
assert type(q) is lu._quantity_class
def test_conversion_to_and_from_physical_quantities():
"""Ensures we can convert from regular quantities."""
mst = [10.0, 12.0, 14.0] * u.STmag
flux_lambda = mst.physical
mst_roundtrip = flux_lambda.to(u.STmag)
# check we return a logquantity; see #5178.
assert isinstance(mst_roundtrip, u.Magnitude)
assert mst_roundtrip.unit == mst.unit
assert_allclose(mst_roundtrip.value, mst.value)
wave = [4956.8, 4959.55, 4962.3] * u.AA
flux_nu = mst.to(u.Jy, equivalencies=u.spectral_density(wave))
mst_roundtrip2 = flux_nu.to(u.STmag, u.spectral_density(wave))
assert isinstance(mst_roundtrip2, u.Magnitude)
assert mst_roundtrip2.unit == mst.unit
assert_allclose(mst_roundtrip2.value, mst.value)
def test_quantity_decomposition():
lq = 10.0 * u.mag(u.Jy)
assert lq.decompose() == lq
assert lq.decompose().unit.physical_unit.bases == [u.kg, u.s]
assert lq.si == lq
assert lq.si.unit.physical_unit.bases == [u.kg, u.s]
assert lq.cgs == lq
assert lq.cgs.unit.physical_unit.bases == [u.g, u.s]
class TestLogQuantityViews:
def setup_method(self):
self.lq = u.Magnitude(np.arange(1.0, 10.0) * u.Jy)
self.lq2 = u.Magnitude(np.arange(1.0, 5.0))
def test_value_view(self):
lq_value = self.lq.value
assert type(lq_value) is np.ndarray
lq_value[2] = -1.0
assert np.all(self.lq.value == lq_value)
def test_function_view(self):
lq_fv = self.lq._function_view
assert type(lq_fv) is u.Quantity
assert lq_fv.unit is self.lq.unit.function_unit
lq_fv[3] = -2.0 * lq_fv.unit
assert np.all(self.lq.value == lq_fv.value)
def test_quantity_view(self):
# Cannot view as Quantity, since the unit cannot be represented.
with pytest.raises(TypeError):
self.lq.view(u.Quantity)
# But a dimensionless one is fine.
q2 = self.lq2.view(u.Quantity)
assert q2.unit is u.mag
assert np.all(q2.value == self.lq2.value)
lq3 = q2.view(u.Magnitude)
assert type(lq3.unit) is u.MagUnit
assert lq3.unit.physical_unit == u.dimensionless_unscaled
assert np.all(lq3 == self.lq2)
class TestLogQuantitySlicing:
def test_item_get_and_set(self):
lq1 = u.Magnitude(np.arange(1.0, 11.0) * u.Jy)
assert lq1[9] == u.Magnitude(10.0 * u.Jy)
lq1[2] = 100.0 * u.Jy
assert lq1[2] == u.Magnitude(100.0 * u.Jy)
with pytest.raises(u.UnitsError):
lq1[2] = 100.0 * u.m
with pytest.raises(u.UnitsError):
lq1[2] = 100.0 * u.mag
with pytest.raises(u.UnitsError):
lq1[2] = u.Magnitude(100.0 * u.m)
assert lq1[2] == u.Magnitude(100.0 * u.Jy)
def test_slice_get_and_set(self):
lq1 = u.Magnitude(np.arange(1.0, 10.0) * u.Jy)
lq1[2:4] = 100.0 * u.Jy
assert np.all(lq1[2:4] == u.Magnitude(100.0 * u.Jy))
with pytest.raises(u.UnitsError):
lq1[2:4] = 100.0 * u.m
with pytest.raises(u.UnitsError):
lq1[2:4] = 100.0 * u.mag
with pytest.raises(u.UnitsError):
lq1[2:4] = u.Magnitude(100.0 * u.m)
assert np.all(lq1[2] == u.Magnitude(100.0 * u.Jy))
class TestLogQuantityArithmetic:
@pytest.mark.parametrize(
"other",
[
2.4 * u.mag(),
12.34 * u.ABmag,
u.Magnitude(3.45 * u.Jy),
u.Dex(3.0),
u.Dex(np.linspace(3000, 5000, 10) * u.Angstrom),
u.Magnitude(6.78, 2.0 * u.mag),
],
)
@pytest.mark.parametrize("fac", [1.0, 2, 0.4])
def test_multiplication_division(self, other, fac):
"""Check that multiplication and division work as expected"""
lq_sf = fac * other
assert lq_sf.unit.physical_unit == other.unit.physical_unit**fac
assert_allclose(lq_sf.physical, other.physical**fac)
lq_sf = other * fac
assert lq_sf.unit.physical_unit == other.unit.physical_unit**fac
assert_allclose(lq_sf.physical, other.physical**fac)
lq_sf = other / fac
assert lq_sf.unit.physical_unit**fac == other.unit.physical_unit
assert_allclose(lq_sf.physical**fac, other.physical)
lq_sf = other.copy()
lq_sf *= fac
assert lq_sf.unit.physical_unit == other.unit.physical_unit**fac
assert_allclose(lq_sf.physical, other.physical**fac)
lq_sf = other.copy()
lq_sf /= fac
assert lq_sf.unit.physical_unit**fac == other.unit.physical_unit
assert_allclose(lq_sf.physical**fac, other.physical)
def test_more_multiplication_division(self):
"""Check that multiplication/division with other quantities is only
possible when the physical unit is dimensionless, and that this keeps
the result as a LogQuantity if possible."""
lq = u.Magnitude(np.arange(1.0, 11.0) * u.Jy)
with pytest.raises(u.UnitsError):
lq * (1.0 * u.m)
with pytest.raises(u.UnitsError):
(1.0 * u.m) * lq
with pytest.raises(u.UnitsError):
lq / lq
for unit in (u.m, u.mag, u.dex):
with pytest.raises(u.UnitsError):
lq / unit
lq2 = u.Magnitude(np.arange(1, 11.0))
with pytest.raises(u.UnitsError):
lq2 * lq
with pytest.raises(u.UnitsError):
lq2 / lq
with pytest.raises(u.UnitsError):
lq / lq2
lq_sf = lq.copy()
with pytest.raises(u.UnitsError):
lq_sf *= lq2
# ensure that nothing changed inside
assert (lq_sf == lq).all()
with pytest.raises(u.UnitsError):
lq_sf /= lq2
# ensure that nothing changed inside
assert (lq_sf == lq).all()
# but dimensionless_unscaled can be cancelled
r = lq2 / u.Magnitude(2.0)
assert r.unit == u.dimensionless_unscaled
assert np.all(r.value == lq2.value / 2.0)
# And multiplying with a dimensionless array is also OK.
r2 = lq2 * np.arange(10.0)
assert isinstance(r2, u.Magnitude)
assert np.all(r2 == lq2._function_view * np.arange(10.0))
# with dimensionless, normal units OK, but return normal quantities
# if the unit no longer is consistent with the logarithmic unit.
tf = lq2 * u.m
tr = u.m * lq2
for t in (tf, tr):
assert not isinstance(t, type(lq2))
assert t.unit == lq2.unit.function_unit * u.m
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(lq2.unit.physical_unit)
t = tf / (50.0 * u.cm)
# now we essentially have the same quantity but with a prefactor of 2
assert t.unit.is_equivalent(lq2.unit.function_unit)
assert_allclose(t.to(lq2.unit.function_unit), lq2._function_view * 2)
@pytest.mark.parametrize("power", (2, 0.5, 1, 0))
def test_raise_to_power(self, power):
"""Check that raising LogQuantities to some power is only possible when
the physical unit is dimensionless, and that conversion is turned off
when the resulting logarithmic unit (say, mag**2) is incompatible."""
lq = u.Magnitude(np.arange(1.0, 4.0) * u.Jy)
if power == 0:
assert np.all(lq**power == 1.0)
elif power == 1:
assert np.all(lq**power == lq)
else:
with pytest.raises(u.UnitsError):
lq**power
# with dimensionless, it works, but falls back to normal quantity
# (except for power=1)
lq2 = u.Magnitude(np.arange(10.0))
t = lq2**power
if power == 0:
assert t.unit is u.dimensionless_unscaled
assert np.all(t.value == 1.0)
elif power == 1:
assert np.all(t == lq2)
else:
assert not isinstance(t, type(lq2))
assert t.unit == lq2.unit.function_unit**power
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(u.dimensionless_unscaled)
def test_error_on_lq_as_power(self):
lq = u.Magnitude(np.arange(1.0, 4.0) * u.Jy)
with pytest.raises(TypeError):
lq**lq
@pytest.mark.parametrize("other", pu_sample)
def test_addition_subtraction_to_normal_units_fails(self, other):
lq = u.Magnitude(np.arange(1.0, 10.0) * u.Jy)
q = 1.23 * other
with pytest.raises(u.UnitsError):
lq + q
with pytest.raises(u.UnitsError):
lq - q
with pytest.raises(u.UnitsError):
q - lq
@pytest.mark.parametrize(
"other",
(
1.23 * u.mag,
2.34 * u.mag(),
u.Magnitude(3.45 * u.Jy),
u.Magnitude(4.56 * u.m),
5.67 * u.Unit(2 * u.mag),
u.Magnitude(6.78, 2.0 * u.mag),
),
)
def test_addition_subtraction(self, other):
"""Check that addition/subtraction with quantities with magnitude or
MagUnit units works, and that it changes the physical units
appropriately."""
lq = u.Magnitude(np.arange(1.0, 10.0) * u.Jy)
other_physical = other.to(
getattr(other.unit, "physical_unit", u.dimensionless_unscaled),
equivalencies=u.logarithmic(),
)
lq_sf = lq + other
assert_allclose(lq_sf.physical, lq.physical * other_physical)
lq_sr = other + lq
assert_allclose(lq_sr.physical, lq.physical * other_physical)
lq_df = lq - other
assert_allclose(lq_df.physical, lq.physical / other_physical)
lq_dr = other - lq
assert_allclose(lq_dr.physical, other_physical / lq.physical)
@pytest.mark.parametrize("other", pu_sample)
def test_inplace_addition_subtraction_unit_checks(self, other):
lu1 = u.mag(u.Jy)
lq1 = u.Magnitude(np.arange(1.0, 10.0), lu1)
with pytest.raises(u.UnitsError):
lq1 += other
assert np.all(lq1.value == np.arange(1.0, 10.0))
assert lq1.unit == lu1
with pytest.raises(u.UnitsError):
lq1 -= other
assert np.all(lq1.value == np.arange(1.0, 10.0))
assert lq1.unit == lu1
@pytest.mark.parametrize(
"other",
(
1.23 * u.mag,
2.34 * u.mag(),
u.Magnitude(3.45 * u.Jy),
u.Magnitude(4.56 * u.m),
5.67 * u.Unit(2 * u.mag),
u.Magnitude(6.78, 2.0 * u.mag),
),
)
def test_inplace_addition_subtraction(self, other):
"""Check that inplace addition/subtraction with quantities with
magnitude or MagUnit units works, and that it changes the physical
units appropriately."""
lq = u.Magnitude(np.arange(1.0, 10.0) * u.Jy)
other_physical = other.to(
getattr(other.unit, "physical_unit", u.dimensionless_unscaled),
equivalencies=u.logarithmic(),
)
lq_sf = lq.copy()
lq_sf += other
assert_allclose(lq_sf.physical, lq.physical * other_physical)
lq_df = lq.copy()
lq_df -= other
assert_allclose(lq_df.physical, lq.physical / other_physical)
def test_complicated_addition_subtraction(self):
"""For fun, a more complicated example of addition and subtraction."""
dm0 = u.Unit("DM", 1.0 / (4.0 * np.pi * (10.0 * u.pc) ** 2))
DMmag = u.mag(dm0)
m_st = 10.0 * u.STmag
dm = 5.0 * DMmag
M_st = m_st - dm
assert M_st.unit.is_equivalent(u.erg / u.s / u.AA)
ratio = M_st.physical / (m_st.physical * 4.0 * np.pi * (100.0 * u.pc) ** 2)
assert np.abs(ratio - 1.0) < 1.0e-15
class TestLogQuantityComparisons:
def test_comparison_to_non_quantities_fails(self):
lq = u.Magnitude(np.arange(1.0, 10.0) * u.Jy)
with pytest.raises(TypeError):
lq > "a"
assert not (lq == "a")
assert lq != "a"
def test_comparison(self):
lq1 = u.Magnitude(np.arange(1.0, 4.0) * u.Jy)
lq2 = u.Magnitude(2.0 * u.Jy)
assert np.all((lq1 > lq2) == np.array([True, False, False]))
assert np.all((lq1 == lq2) == np.array([False, True, False]))
lq3 = u.Dex(2.0 * u.Jy)
assert np.all((lq1 > lq3) == np.array([True, False, False]))
assert np.all((lq1 == lq3) == np.array([False, True, False]))
lq4 = u.Magnitude(2.0 * u.m)
assert not (lq1 == lq4)
assert lq1 != lq4
with pytest.raises(u.UnitsError):
lq1 < lq4
q5 = 1.5 * u.Jy
assert np.all((lq1 > q5) == np.array([True, False, False]))
assert np.all((q5 < lq1) == np.array([True, False, False]))
with pytest.raises(u.UnitsError):
lq1 >= 2.0 * u.m
with pytest.raises(u.UnitsError):
lq1 <= lq1.value * u.mag
# For physically dimensionless, we can compare with the function unit.
lq6 = u.Magnitude(np.arange(1.0, 4.0))
fv6 = lq6.value * u.mag
assert np.all(lq6 == fv6)
# but not some arbitrary unit, of course.
with pytest.raises(u.UnitsError):
lq6 < 2.0 * u.m
class TestLogQuantityMethods:
def setup_method(self):
self.mJy = np.arange(1.0, 5.0).reshape(2, 2) * u.mag(u.Jy)
self.m1 = np.arange(1.0, 5.5, 0.5).reshape(3, 3) * u.mag()
self.mags = (self.mJy, self.m1)
@pytest.mark.parametrize(
"method",
(
"mean",
"min",
"max",
"round",
"trace",
"std",
"var",
"ptp",
"diff",
"ediff1d",
),
)
def test_always_ok(self, method):
for mag in self.mags:
res = getattr(mag, method)()
assert np.all(res.value == getattr(mag._function_view, method)().value)
if method in ("std", "ptp", "diff", "ediff1d"):
assert res.unit == u.mag()
elif method == "var":
assert res.unit == u.mag**2
else:
assert res.unit == mag.unit
def test_clip(self):
for mag in self.mags:
assert np.all(
mag.clip(2.0 * mag.unit, 4.0 * mag.unit).value
== mag.value.clip(2.0, 4.0)
)
@pytest.mark.parametrize("method", ("sum", "cumsum"))
def test_only_ok_if_dimensionless(self, method):
res = getattr(self.m1, method)()
assert np.all(res.value == getattr(self.m1._function_view, method)().value)
assert res.unit == self.m1.unit
with pytest.raises(TypeError):
getattr(self.mJy, method)()
def test_dot(self):
assert np.all(self.m1.dot(self.m1).value == self.m1.value.dot(self.m1.value))
@pytest.mark.parametrize("method", ("prod", "cumprod"))
def test_never_ok(self, method):
with pytest.raises(TypeError):
getattr(self.mJy, method)()
with pytest.raises(TypeError):
getattr(self.m1, method)()
|
13246fd7beafa1a5908618361f644372178a8f8c09c3b6493f2e9c702753097b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test the Quantity class and related."""
import copy
import decimal
import numbers
import pickle
from fractions import Fraction
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_almost_equal, assert_array_equal
from astropy import units as u
from astropy.units.quantity import _UNIT_NOT_INITIALISED
from astropy.utils import isiterable, minversion
from astropy.utils.exceptions import AstropyWarning
""" The Quantity class will represent a number + unit + uncertainty """
class TestQuantityCreation:
def test_1(self):
# create objects through operations with Unit objects:
quantity = 11.42 * u.meter # returns a Quantity object
assert isinstance(quantity, u.Quantity)
quantity = u.meter * 11.42 # returns a Quantity object
assert isinstance(quantity, u.Quantity)
quantity = 11.42 / u.meter
assert isinstance(quantity, u.Quantity)
quantity = u.meter / 11.42
assert isinstance(quantity, u.Quantity)
quantity = 11.42 * u.meter / u.second
assert isinstance(quantity, u.Quantity)
with pytest.raises(TypeError):
quantity = 182.234 + u.meter
with pytest.raises(TypeError):
quantity = 182.234 - u.meter
with pytest.raises(TypeError):
quantity = 182.234 % u.meter
def test_2(self):
# create objects using the Quantity constructor:
_ = u.Quantity(11.412, unit=u.meter)
_ = u.Quantity(21.52, "cm")
q3 = u.Quantity(11.412)
# By default quantities that don't specify a unit are unscaled
# dimensionless
assert q3.unit == u.Unit(1)
with pytest.raises(TypeError):
u.Quantity(object(), unit=u.m)
def test_3(self):
# with pytest.raises(u.UnitsError):
with pytest.raises(ValueError): # Until @mdboom fixes the errors in units
u.Quantity(11.412, unit="testingggg")
def test_nan_inf(self):
# Not-a-number
q = u.Quantity("nan", unit="cm")
assert np.isnan(q.value)
q = u.Quantity("NaN", unit="cm")
assert np.isnan(q.value)
q = u.Quantity("-nan", unit="cm") # float() allows this
assert np.isnan(q.value)
q = u.Quantity("nan cm")
assert np.isnan(q.value)
assert q.unit == u.cm
# Infinity
q = u.Quantity("inf", unit="cm")
assert np.isinf(q.value)
q = u.Quantity("-inf", unit="cm")
assert np.isinf(q.value)
q = u.Quantity("inf cm")
assert np.isinf(q.value)
assert q.unit == u.cm
q = u.Quantity("Infinity", unit="cm") # float() allows this
assert np.isinf(q.value)
# make sure these strings don't parse...
with pytest.raises(TypeError):
q = u.Quantity("", unit="cm")
with pytest.raises(TypeError):
q = u.Quantity("spam", unit="cm")
def test_unit_property(self):
# test getting and setting 'unit' attribute
q1 = u.Quantity(11.4, unit=u.meter)
with pytest.raises(AttributeError):
q1.unit = u.cm
def test_preserve_dtype(self):
"""Test that if an explicit dtype is given, it is used, while if not,
numbers are converted to float (including decimal.Decimal, which
numpy converts to an object; closes #1419)
"""
# If dtype is specified, use it, but if not, convert int, bool to float
q1 = u.Quantity(12, unit=u.m / u.s, dtype=int)
assert q1.dtype == int
q2 = u.Quantity(q1)
assert q2.dtype == float
assert q2.value == float(q1.value)
assert q2.unit == q1.unit
# but we should preserve any float32 or even float16
a3_32 = np.array([1.0, 2.0], dtype=np.float32)
q3_32 = u.Quantity(a3_32, u.yr)
assert q3_32.dtype == a3_32.dtype
a3_16 = np.array([1.0, 2.0], dtype=np.float16)
q3_16 = u.Quantity(a3_16, u.yr)
assert q3_16.dtype == a3_16.dtype
# items stored as objects by numpy should be converted to float
# by default
q4 = u.Quantity(decimal.Decimal("10.25"), u.m)
assert q4.dtype == float
q5 = u.Quantity(decimal.Decimal("10.25"), u.m, dtype=object)
assert q5.dtype == object
def test_numpy_style_dtype_inspect(self):
"""Test that if ``dtype=None``, NumPy's dtype inspection is used."""
q2 = u.Quantity(12, dtype=None)
assert np.issubdtype(q2.dtype, np.integer)
def test_float_dtype_promotion(self):
"""Test that if ``dtype=numpy.inexact``, the minimum precision is float64."""
q1 = u.Quantity(12, dtype=np.inexact)
assert not np.issubdtype(q1.dtype, np.integer)
assert q1.dtype == np.float64
q2 = u.Quantity(np.float64(12), dtype=np.inexact)
assert q2.dtype == np.float64
q3 = u.Quantity(np.float32(12), dtype=np.inexact)
assert q3.dtype == np.float32
if hasattr(np, "float16"):
q3 = u.Quantity(np.float16(12), dtype=np.inexact)
assert q3.dtype == np.float16
if hasattr(np, "float128"):
q4 = u.Quantity(np.float128(12), dtype=np.inexact)
assert q4.dtype == np.float128
def test_copy(self):
# By default, a new quantity is constructed, but not if copy=False
a = np.arange(10.0)
q0 = u.Quantity(a, unit=u.m / u.s)
assert q0.base is not a
q1 = u.Quantity(a, unit=u.m / u.s, copy=False)
assert q1.base is a
q2 = u.Quantity(q0)
assert q2 is not q0
assert q2.base is not q0.base
q2 = u.Quantity(q0, copy=False)
assert q2 is q0
assert q2.base is q0.base
q3 = u.Quantity(q0, q0.unit, copy=False)
assert q3 is q0
assert q3.base is q0.base
q4 = u.Quantity(q0, u.cm / u.s, copy=False)
assert q4 is not q0
assert q4.base is not q0.base
def test_subok(self):
"""Test subok can be used to keep class, or to insist on Quantity"""
class MyQuantitySubclass(u.Quantity):
pass
myq = MyQuantitySubclass(np.arange(10.0), u.m)
# try both with and without changing the unit
assert type(u.Quantity(myq)) is u.Quantity
assert type(u.Quantity(myq, subok=True)) is MyQuantitySubclass
assert type(u.Quantity(myq, u.km)) is u.Quantity
assert type(u.Quantity(myq, u.km, subok=True)) is MyQuantitySubclass
def test_order(self):
"""Test that order is correctly propagated to np.array"""
ac = np.array(np.arange(10.0), order="C")
qcc = u.Quantity(ac, u.m, order="C")
assert qcc.flags["C_CONTIGUOUS"]
qcf = u.Quantity(ac, u.m, order="F")
assert qcf.flags["F_CONTIGUOUS"]
qca = u.Quantity(ac, u.m, order="A")
assert qca.flags["C_CONTIGUOUS"]
# check it works also when passing in a quantity
assert u.Quantity(qcc, order="C").flags["C_CONTIGUOUS"]
assert u.Quantity(qcc, order="A").flags["C_CONTIGUOUS"]
assert u.Quantity(qcc, order="F").flags["F_CONTIGUOUS"]
af = np.array(np.arange(10.0), order="F")
qfc = u.Quantity(af, u.m, order="C")
assert qfc.flags["C_CONTIGUOUS"]
qff = u.Quantity(ac, u.m, order="F")
assert qff.flags["F_CONTIGUOUS"]
qfa = u.Quantity(af, u.m, order="A")
assert qfa.flags["F_CONTIGUOUS"]
assert u.Quantity(qff, order="C").flags["C_CONTIGUOUS"]
assert u.Quantity(qff, order="A").flags["F_CONTIGUOUS"]
assert u.Quantity(qff, order="F").flags["F_CONTIGUOUS"]
def test_ndmin(self):
"""Test that ndmin is correctly propagated to np.array"""
a = np.arange(10.0)
q1 = u.Quantity(a, u.m, ndmin=1)
assert q1.ndim == 1 and q1.shape == (10,)
q2 = u.Quantity(a, u.m, ndmin=2)
assert q2.ndim == 2 and q2.shape == (1, 10)
# check it works also when passing in a quantity
q3 = u.Quantity(q1, u.m, ndmin=3)
assert q3.ndim == 3 and q3.shape == (1, 1, 10)
# see github issue #10063
assert u.Quantity(u.Quantity(1, "m"), "m", ndmin=1).ndim == 1
assert u.Quantity(u.Quantity(1, "cm"), "m", ndmin=1).ndim == 1
def test_non_quantity_with_unit(self):
"""Test that unit attributes in objects get recognized."""
class MyQuantityLookalike(np.ndarray):
pass
a = np.arange(3.0)
mylookalike = a.copy().view(MyQuantityLookalike)
mylookalike.unit = "m"
q1 = u.Quantity(mylookalike)
assert isinstance(q1, u.Quantity)
assert q1.unit is u.m
assert np.all(q1.value == a)
q2 = u.Quantity(mylookalike, u.mm)
assert q2.unit is u.mm
assert np.all(q2.value == 1000.0 * a)
q3 = u.Quantity(mylookalike, copy=False)
assert np.all(q3.value == mylookalike)
q3[2] = 0
assert q3[2] == 0.0
assert mylookalike[2] == 0.0
mylookalike = a.copy().view(MyQuantityLookalike)
mylookalike.unit = u.m
q4 = u.Quantity(mylookalike, u.mm, copy=False)
q4[2] = 0
assert q4[2] == 0.0
assert mylookalike[2] == 2.0
mylookalike.unit = "nonsense"
with pytest.raises(TypeError):
u.Quantity(mylookalike)
def test_creation_via_view(self):
# This works but is no better than 1. * u.m
q1 = 1.0 << u.m
assert isinstance(q1, u.Quantity)
assert q1.unit == u.m
assert q1.value == 1.0
# With an array, we get an actual view.
a2 = np.arange(10.0)
q2 = a2 << u.m / u.s
assert isinstance(q2, u.Quantity)
assert q2.unit == u.m / u.s
assert np.all(q2.value == a2)
a2[9] = 0.0
assert np.all(q2.value == a2)
# But with a unit change we get a copy.
q3 = q2 << u.mm / u.s
assert isinstance(q3, u.Quantity)
assert q3.unit == u.mm / u.s
assert np.all(q3.value == a2 * 1000.0)
a2[8] = 0.0
assert q3[8].value == 8000.0
# Without a unit change, we do get a view.
q4 = q2 << q2.unit
a2[7] = 0.0
assert np.all(q4.value == a2)
with pytest.raises(u.UnitsError):
q2 << u.s
# But one can do an in-place unit change.
a2_copy = a2.copy()
q2 <<= u.mm / u.s
assert q2.unit == u.mm / u.s
# Of course, this changes a2 as well.
assert np.all(q2.value == a2)
# Sanity check on the values.
assert np.all(q2.value == a2_copy * 1000.0)
a2[8] = -1.0
# Using quantities, one can also work with strings.
q5 = q2 << "km/hr"
assert q5.unit == u.km / u.hr
assert np.all(q5 == q2)
# Finally, we can use scalar quantities as units.
not_quite_a_foot = 30.0 * u.cm
a6 = np.arange(5.0)
q6 = a6 << not_quite_a_foot
assert q6.unit == u.Unit(not_quite_a_foot)
assert np.all(q6.to_value(u.cm) == 30.0 * a6)
def test_rshift_warns(self):
with pytest.raises(TypeError), pytest.warns(
AstropyWarning, match="is not implemented"
) as warning_lines:
1 >> u.m
assert len(warning_lines) == 1
q = 1.0 * u.km
with pytest.raises(TypeError), pytest.warns(
AstropyWarning, match="is not implemented"
) as warning_lines:
q >> u.m
assert len(warning_lines) == 1
with pytest.raises(TypeError), pytest.warns(
AstropyWarning, match="is not implemented"
) as warning_lines:
q >>= u.m
assert len(warning_lines) == 1
with pytest.raises(TypeError), pytest.warns(
AstropyWarning, match="is not implemented"
) as warning_lines:
1.0 >> q
assert len(warning_lines) == 1
class TestQuantityOperations:
q1 = u.Quantity(11.42, u.meter)
q2 = u.Quantity(8.0, u.centimeter)
def test_addition(self):
# Take units from left object, q1
new_quantity = self.q1 + self.q2
assert new_quantity.value == 11.5
assert new_quantity.unit == u.meter
# Take units from left object, q2
new_quantity = self.q2 + self.q1
assert new_quantity.value == 1150.0
assert new_quantity.unit == u.centimeter
new_q = u.Quantity(1500.1, u.m) + u.Quantity(13.5, u.km)
assert new_q.unit == u.m
assert new_q.value == 15000.1
def test_subtraction(self):
# Take units from left object, q1
new_quantity = self.q1 - self.q2
assert new_quantity.value == 11.34
assert new_quantity.unit == u.meter
# Take units from left object, q2
new_quantity = self.q2 - self.q1
assert new_quantity.value == -1134.0
assert new_quantity.unit == u.centimeter
def test_multiplication(self):
# Take units from left object, q1
new_quantity = self.q1 * self.q2
assert new_quantity.value == 91.36
assert new_quantity.unit == (u.meter * u.centimeter)
# Take units from left object, q2
new_quantity = self.q2 * self.q1
assert new_quantity.value == 91.36
assert new_quantity.unit == (u.centimeter * u.meter)
# Multiply with a number
new_quantity = 15.0 * self.q1
assert new_quantity.value == 171.3
assert new_quantity.unit == u.meter
# Multiply with a number
new_quantity = self.q1 * 15.0
assert new_quantity.value == 171.3
assert new_quantity.unit == u.meter
# Multiple with a unit.
new_quantity = self.q1 * u.s
assert new_quantity.value == 11.42
assert new_quantity.unit == u.Unit("m s")
# Reverse multiple with a unit.
new_quantity = u.s * self.q1
assert new_quantity.value == 11.42
assert new_quantity.unit == u.Unit("m s")
def test_division(self):
# Take units from left object, q1
new_quantity = self.q1 / self.q2
assert_array_almost_equal(new_quantity.value, 1.4275, decimal=5)
assert new_quantity.unit == (u.meter / u.centimeter)
# Take units from left object, q2
new_quantity = self.q2 / self.q1
assert_array_almost_equal(new_quantity.value, 0.70052539404553416, decimal=16)
assert new_quantity.unit == (u.centimeter / u.meter)
q1 = u.Quantity(11.4, unit=u.meter)
q2 = u.Quantity(10.0, unit=u.second)
new_quantity = q1 / q2
assert_array_almost_equal(new_quantity.value, 1.14, decimal=10)
assert new_quantity.unit == (u.meter / u.second)
# divide with a number
new_quantity = self.q1 / 10.0
assert new_quantity.value == 1.142
assert new_quantity.unit == u.meter
# divide with a number
new_quantity = 11.42 / self.q1
assert new_quantity.value == 1.0
assert new_quantity.unit == u.Unit("1/m")
# Divide by a unit.
new_quantity = self.q1 / u.s
assert new_quantity.value == 11.42
assert new_quantity.unit == u.Unit("m/s")
# Divide into a unit.
new_quantity = u.s / self.q1
assert new_quantity.value == 1 / 11.42
assert new_quantity.unit == u.Unit("s/m")
def test_commutativity(self):
"""Regression test for issue #587."""
new_q = u.Quantity(11.42, "m*s")
assert self.q1 * u.s == u.s * self.q1 == new_q
assert self.q1 / u.s == u.Quantity(11.42, "m/s")
assert u.s / self.q1 == u.Quantity(1 / 11.42, "s/m")
def test_power(self):
# raise quantity to a power
new_quantity = self.q1**2
assert_array_almost_equal(new_quantity.value, 130.4164, decimal=5)
assert new_quantity.unit == u.Unit("m^2")
new_quantity = self.q1**3
assert_array_almost_equal(new_quantity.value, 1489.355288, decimal=7)
assert new_quantity.unit == u.Unit("m^3")
def test_matrix_multiplication(self):
a = np.eye(3)
q = a * u.m
result1 = q @ a
assert np.all(result1 == q)
result2 = a @ q
assert np.all(result2 == q)
result3 = q @ q
assert np.all(result3 == a * u.m**2)
q2 = np.array(
[[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]],
[[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]],
[[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]]]
) / u.s # fmt: skip
result4 = q @ q2
assert np.all(result4 == np.matmul(a, q2.value) * q.unit * q2.unit)
def test_unary(self):
# Test the minus unary operator
new_quantity = -self.q1
assert new_quantity.value == -self.q1.value
assert new_quantity.unit == self.q1.unit
new_quantity = -(-self.q1)
assert new_quantity.value == self.q1.value
assert new_quantity.unit == self.q1.unit
# Test the plus unary operator
new_quantity = +self.q1
assert new_quantity.value == self.q1.value
assert new_quantity.unit == self.q1.unit
def test_abs(self):
q = 1.0 * u.m / u.s
new_quantity = abs(q)
assert new_quantity.value == q.value
assert new_quantity.unit == q.unit
q = -1.0 * u.m / u.s
new_quantity = abs(q)
assert new_quantity.value == -q.value
assert new_quantity.unit == q.unit
def test_incompatible_units(self):
"""When trying to add or subtract units that aren't compatible, throw an error"""
q1 = u.Quantity(11.412, unit=u.meter)
q2 = u.Quantity(21.52, unit=u.second)
with pytest.raises(u.UnitsError):
q1 + q2
def test_non_number_type(self):
q1 = u.Quantity(11.412, unit=u.meter)
with pytest.raises(
TypeError, match=r"Unsupported operand type\(s\) for ufunc .*"
):
q1 + {"a": 1}
with pytest.raises(TypeError):
q1 + u.meter
def test_dimensionless_operations(self):
# test conversion to dimensionless
dq = 3.0 * u.m / u.km
dq1 = dq + 1.0 * u.mm / u.km
assert dq1.value == 3.001
assert dq1.unit == dq.unit
dq2 = dq + 1.0
assert dq2.value == 1.003
assert dq2.unit == u.dimensionless_unscaled
# this test will check that operations with dimensionless Quantities
# don't work
with pytest.raises(u.UnitsError):
self.q1 + u.Quantity(0.1, unit=u.Unit(""))
with pytest.raises(u.UnitsError):
self.q1 - u.Quantity(0.1, unit=u.Unit(""))
# and test that scaling of integers works
q = u.Quantity(np.array([1, 2, 3]), u.m / u.km, dtype=int)
q2 = q + np.array([4, 5, 6])
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value, np.array([4.001, 5.002, 6.003]))
# but not if doing it inplace
with pytest.raises(TypeError):
q += np.array([1, 2, 3])
# except if it is actually possible
q = np.array([1, 2, 3]) * u.km / u.m
q += np.array([4, 5, 6])
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value == np.array([1004, 2005, 3006]))
def test_complicated_operation(self):
"""Perform a more complicated test"""
from astropy.units import imperial
# Multiple units
distance = u.Quantity(15.0, u.meter)
time = u.Quantity(11.0, u.second)
velocity = (distance / time).to(imperial.mile / u.hour)
assert_array_almost_equal(velocity.value, 3.05037, decimal=5)
G = u.Quantity(6.673e-11, u.m**3 / u.kg / u.s**2)
_ = (1.0 / (4.0 * np.pi * G)).to(u.pc**-3 / u.s**-2 * u.kg)
# Area
side1 = u.Quantity(11.0, u.centimeter)
side2 = u.Quantity(7.0, u.centimeter)
area = side1 * side2
assert_array_almost_equal(area.value, 77.0, decimal=15)
assert area.unit == u.cm * u.cm
def test_comparison(self):
# equality/ non-equality is straightforward for quantity objects
assert (1 / (u.cm * u.cm)) == 1 * u.cm**-2
assert 1 * u.m == 100 * u.cm
assert 1 * u.m != 1 * u.cm
# when one is a unit, Quantity does not know what to do,
# but unit is fine with it, so it still works
unit = u.cm**3
q = 1.0 * unit
assert q.__eq__(unit) is NotImplemented
assert unit.__eq__(q) is True
assert q == unit
q = 1000.0 * u.mm**3
assert q == unit
# mismatched types should never work
assert not 1.0 * u.cm == 1.0
assert 1.0 * u.cm != 1.0
for quantity in (1.0 * u.cm, 1.0 * u.dimensionless_unscaled):
with pytest.raises(ValueError, match="ambiguous"):
bool(quantity)
def test_numeric_converters(self):
# float, int, long, and __index__ should only work for single
# quantities, of appropriate type, and only if they are dimensionless.
# for index, this should be unscaled as well
# (Check on __index__ is also a regression test for #1557)
# quantities with units should never convert, or be usable as an index
q1 = u.Quantity(1, u.m)
converter_err_msg = (
"only dimensionless scalar quantities can be converted to Python scalars"
)
index_err_msg = (
"only integer dimensionless scalar quantities "
"can be converted to a Python index"
)
with pytest.raises(TypeError) as exc:
float(q1)
assert exc.value.args[0] == converter_err_msg
with pytest.raises(TypeError) as exc:
int(q1)
assert exc.value.args[0] == converter_err_msg
# We used to test `q1 * ['a', 'b', 'c'] here, but that that worked
# at all was a really odd confluence of bugs. Since it doesn't work
# in numpy >=1.10 any more, just go directly for `__index__` (which
# makes the test more similar to the `int`, `long`, etc., tests).
with pytest.raises(TypeError) as exc:
q1.__index__()
assert exc.value.args[0] == index_err_msg
# dimensionless but scaled is OK, however
q2 = u.Quantity(1.23, u.m / u.km)
assert float(q2) == float(q2.to_value(u.dimensionless_unscaled))
assert int(q2) == int(q2.to_value(u.dimensionless_unscaled))
with pytest.raises(TypeError) as exc:
q2.__index__()
assert exc.value.args[0] == index_err_msg
# dimensionless unscaled is OK, though for index needs to be int
q3 = u.Quantity(1.23, u.dimensionless_unscaled)
assert float(q3) == 1.23
assert int(q3) == 1
with pytest.raises(TypeError) as exc:
q3.__index__()
assert exc.value.args[0] == index_err_msg
# integer dimensionless unscaled is good for all
q4 = u.Quantity(2, u.dimensionless_unscaled, dtype=int)
assert float(q4) == 2.0
assert int(q4) == 2
assert q4.__index__() == 2
# but arrays are not OK
q5 = u.Quantity([1, 2], u.m)
with pytest.raises(TypeError) as exc:
float(q5)
assert exc.value.args[0] == converter_err_msg
with pytest.raises(TypeError) as exc:
int(q5)
assert exc.value.args[0] == converter_err_msg
with pytest.raises(TypeError) as exc:
q5.__index__()
assert exc.value.args[0] == index_err_msg
# See https://github.com/numpy/numpy/issues/5074
# It seems unlikely this will be resolved, so xfail'ing it.
@pytest.mark.xfail(reason="list multiplication only works for numpy <=1.10")
def test_numeric_converter_to_index_in_practice(self):
"""Test that use of __index__ actually works."""
q4 = u.Quantity(2, u.dimensionless_unscaled, dtype=int)
assert q4 * ["a", "b", "c"] == ["a", "b", "c", "a", "b", "c"]
def test_array_converters(self):
# Scalar quantity
q = u.Quantity(1.23, u.m)
assert np.all(np.array(q) == np.array([1.23]))
# Array quantity
q = u.Quantity([1.0, 2.0, 3.0], u.m)
assert np.all(np.array(q) == np.array([1.0, 2.0, 3.0]))
def test_quantity_conversion():
q1 = u.Quantity(0.1, unit=u.meter)
value = q1.value
assert value == 0.1
value_in_km = q1.to_value(u.kilometer)
assert value_in_km == 0.0001
new_quantity = q1.to(u.kilometer)
assert new_quantity.value == 0.0001
with pytest.raises(u.UnitsError):
q1.to(u.zettastokes)
with pytest.raises(u.UnitsError):
q1.to_value(u.zettastokes)
def test_quantity_ilshift(): # in-place conversion
q = u.Quantity(10, unit=u.one)
# Incompatible units. This goes through ilshift and hits a
# UnitConversionError first in ilshift, then in the unit's rlshift.
with pytest.raises(u.UnitConversionError):
q <<= u.rad
# unless the equivalency is enabled
with u.add_enabled_equivalencies(u.dimensionless_angles()):
q <<= u.rad
assert np.isclose(q, 10 * u.rad)
def test_regression_12964():
# This will fail if the fix to
# https://github.com/astropy/astropy/issues/12964 doesn't work.
x = u.Quantity(10, u.km, dtype=int)
x <<= u.pc
# We add a test that this worked.
assert x.unit is u.pc
assert x.dtype == np.float64
def test_quantity_value_views():
q1 = u.Quantity([1.0, 2.0], unit=u.meter)
# views if the unit is the same.
v1 = q1.value
v1[0] = 0.0
assert np.all(q1 == [0.0, 2.0] * u.meter)
v2 = q1.to_value()
v2[1] = 3.0
assert np.all(q1 == [0.0, 3.0] * u.meter)
v3 = q1.to_value("m")
v3[0] = 1.0
assert np.all(q1 == [1.0, 3.0] * u.meter)
q2 = q1.to("m", copy=False)
q2[0] = 2 * u.meter
assert np.all(q1 == [2.0, 3.0] * u.meter)
v4 = q1.to_value("cm")
v4[0] = 0.0
# copy if different unit.
assert np.all(q1 == [2.0, 3.0] * u.meter)
def test_quantity_conversion_with_equiv():
q1 = u.Quantity(0.1, unit=u.meter)
v2 = q1.to_value(u.Hz, equivalencies=u.spectral())
assert_allclose(v2, 2997924580.0)
q2 = q1.to(u.Hz, equivalencies=u.spectral())
assert_allclose(q2.value, v2)
q1 = u.Quantity(0.4, unit=u.arcsecond)
v2 = q1.to_value(u.au, equivalencies=u.parallax())
q2 = q1.to(u.au, equivalencies=u.parallax())
v3 = q2.to_value(u.arcminute, equivalencies=u.parallax())
q3 = q2.to(u.arcminute, equivalencies=u.parallax())
assert_allclose(v2, 515662.015)
assert_allclose(q2.value, v2)
assert q2.unit == u.au
assert_allclose(v3, 0.0066666667)
assert_allclose(q3.value, v3)
assert q3.unit == u.arcminute
def test_quantity_conversion_equivalency_passed_on():
class MySpectral(u.Quantity):
_equivalencies = u.spectral()
def __quantity_view__(self, obj, unit):
return obj.view(MySpectral)
def __quantity_instance__(self, *args, **kwargs):
return MySpectral(*args, **kwargs)
q1 = MySpectral([1000, 2000], unit=u.Hz)
q2 = q1.to(u.nm)
assert q2.unit == u.nm
q3 = q2.to(u.Hz)
assert q3.unit == u.Hz
assert_allclose(q3.value, q1.value)
q4 = MySpectral([1000, 2000], unit=u.nm)
q5 = q4.to(u.Hz).to(u.nm)
assert q5.unit == u.nm
assert_allclose(q4.value, q5.value)
# Regression test for issue #2315, divide-by-zero error when examining 0*unit
def test_self_equivalency():
assert u.deg.is_equivalent(0 * u.radian)
assert u.deg.is_equivalent(1 * u.radian)
def test_si():
q1 = 10.0 * u.m * u.s**2 / (200.0 * u.ms) ** 2 # 250 meters
assert q1.si.value == 250
assert q1.si.unit == u.m
q = 10.0 * u.m # 10 meters
assert q.si.value == 10
assert q.si.unit == u.m
q = 10.0 / u.m # 10 1 / meters
assert q.si.value == 10
assert q.si.unit == (1 / u.m)
def test_cgs():
q1 = 10.0 * u.cm * u.s**2 / (200.0 * u.ms) ** 2 # 250 centimeters
assert q1.cgs.value == 250
assert q1.cgs.unit == u.cm
q = 10.0 * u.m # 10 centimeters
assert q.cgs.value == 1000
assert q.cgs.unit == u.cm
q = 10.0 / u.cm # 10 1 / centimeters
assert q.cgs.value == 10
assert q.cgs.unit == (1 / u.cm)
q = 10.0 * u.Pa # 10 pascals
assert q.cgs.value == 100
assert q.cgs.unit == u.barye
class TestQuantityComparison:
def test_quantity_equality(self):
assert u.Quantity(1000, unit="m") == u.Quantity(1, unit="km")
assert not (u.Quantity(1, unit="m") == u.Quantity(1, unit="km"))
# for ==, !=, return False, True if units do not match
assert (u.Quantity(1100, unit=u.m) != u.Quantity(1, unit=u.s)) is True
assert (u.Quantity(1100, unit=u.m) == u.Quantity(1, unit=u.s)) is False
assert (u.Quantity(0, unit=u.m) == u.Quantity(0, unit=u.s)) is False
# But allow comparison with 0, +/-inf if latter unitless
assert u.Quantity(0, u.m) == 0.0
assert u.Quantity(1, u.m) != 0.0
assert u.Quantity(1, u.m) != np.inf
assert u.Quantity(np.inf, u.m) == np.inf
def test_quantity_equality_array(self):
a = u.Quantity([0.0, 1.0, 1000.0], u.m)
b = u.Quantity(1.0, u.km)
eq = a == b
ne = a != b
assert np.all(eq == [False, False, True])
assert np.all(eq != ne)
# For mismatched units, we should just get True, False
c = u.Quantity(1.0, u.s)
eq = a == c
ne = a != c
assert eq is False
assert ne is True
# Constants are treated as dimensionless, so False too.
eq = a == 1.0
ne = a != 1.0
assert eq is False
assert ne is True
# But 0 can have any units, so we can compare.
eq = a == 0
ne = a != 0
assert np.all(eq == [True, False, False])
assert np.all(eq != ne)
# But we do not extend that to arrays; they should have the same unit.
d = np.array([0, 1.0, 1000.0])
eq = a == d
ne = a != d
assert eq is False
assert ne is True
def test_quantity_comparison(self):
assert u.Quantity(1100, unit=u.meter) > u.Quantity(1, unit=u.kilometer)
assert u.Quantity(900, unit=u.meter) < u.Quantity(1, unit=u.kilometer)
with pytest.raises(u.UnitsError):
assert u.Quantity(1100, unit=u.meter) > u.Quantity(1, unit=u.second)
with pytest.raises(u.UnitsError):
assert u.Quantity(1100, unit=u.meter) < u.Quantity(1, unit=u.second)
assert u.Quantity(1100, unit=u.meter) >= u.Quantity(1, unit=u.kilometer)
assert u.Quantity(1000, unit=u.meter) >= u.Quantity(1, unit=u.kilometer)
assert u.Quantity(900, unit=u.meter) <= u.Quantity(1, unit=u.kilometer)
assert u.Quantity(1000, unit=u.meter) <= u.Quantity(1, unit=u.kilometer)
with pytest.raises(u.UnitsError):
assert u.Quantity(1100, unit=u.meter) >= u.Quantity(1, unit=u.second)
with pytest.raises(u.UnitsError):
assert u.Quantity(1100, unit=u.meter) <= u.Quantity(1, unit=u.second)
assert u.Quantity(1200, unit=u.meter) != u.Quantity(1, unit=u.kilometer)
class TestQuantityDisplay:
scalarintq = u.Quantity(1, unit="m", dtype=int)
scalarfloatq = u.Quantity(1.3, unit="m")
arrq = u.Quantity([1, 2.3, 8.9], unit="m")
scalar_complex_q = u.Quantity(complex(1.0, 2.0))
scalar_big_complex_q = u.Quantity(complex(1.0, 2.0e27) * 1e25)
scalar_big_neg_complex_q = u.Quantity(complex(-1.0, -2.0e27) * 1e36)
arr_complex_q = u.Quantity(np.arange(3) * (complex(-1.0, -2.0e27) * 1e36))
big_arr_complex_q = u.Quantity(np.arange(125) * (complex(-1.0, -2.0e27) * 1e36))
def test_dimensionless_quantity_repr(self):
q2 = u.Quantity(1.0, unit="m-1")
q3 = u.Quantity(1, unit="m-1", dtype=int)
assert repr(self.scalarintq * q2) == "<Quantity 1.>"
assert repr(self.arrq * q2) == "<Quantity [1. , 2.3, 8.9]>"
assert repr(self.scalarintq * q3) == "<Quantity 1>"
def test_dimensionless_quantity_str(self):
q2 = u.Quantity(1.0, unit="m-1")
q3 = u.Quantity(1, unit="m-1", dtype=int)
assert str(self.scalarintq * q2) == "1.0"
assert str(self.scalarintq * q3) == "1"
assert str(self.arrq * q2) == "[1. 2.3 8.9]"
def test_dimensionless_quantity_format(self):
q1 = u.Quantity(3.14)
assert format(q1, ".2f") == "3.14"
assert f"{q1:cds}" == "3.14"
def test_scalar_quantity_str(self):
assert str(self.scalarintq) == "1 m"
assert str(self.scalarfloatq) == "1.3 m"
def test_scalar_quantity_repr(self):
assert repr(self.scalarintq) == "<Quantity 1 m>"
assert repr(self.scalarfloatq) == "<Quantity 1.3 m>"
def test_array_quantity_str(self):
assert str(self.arrq) == "[1. 2.3 8.9] m"
def test_array_quantity_repr(self):
assert repr(self.arrq) == "<Quantity [1. , 2.3, 8.9] m>"
def test_scalar_quantity_format(self):
assert format(self.scalarintq, "02d") == "01 m"
assert format(self.scalarfloatq, ".1f") == "1.3 m"
assert format(self.scalarfloatq, ".0f") == "1 m"
assert f"{self.scalarintq:cds}" == "1 m"
assert f"{self.scalarfloatq:cds}" == "1.3 m"
def test_uninitialized_unit_format(self):
bad_quantity = np.arange(10.0).view(u.Quantity)
assert str(bad_quantity).endswith(_UNIT_NOT_INITIALISED)
assert repr(bad_quantity).endswith(_UNIT_NOT_INITIALISED + ">")
def test_to_string(self):
qscalar = u.Quantity(1.5e14, "m/s")
# __str__ is the default `format`
assert str(qscalar) == qscalar.to_string()
res = "Quantity as KMS: 150000000000.0 km / s"
assert f"Quantity as KMS: {qscalar.to_string(unit=u.km / u.s)}" == res
# With precision set
res = "Quantity as KMS: 1.500e+11 km / s"
assert (
f"Quantity as KMS: {qscalar.to_string(precision=3, unit=u.km / u.s)}" == res
)
res = r"$1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$"
assert qscalar.to_string(format="latex") == res
assert qscalar.to_string(format="latex", subfmt="inline") == res
res = r"$\displaystyle 1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$"
assert qscalar.to_string(format="latex", subfmt="display") == res
res = r"$1.5 \times 10^{14} \; \mathrm{m\,s^{-1}}$"
assert qscalar.to_string(format="latex_inline") == res
assert qscalar.to_string(format="latex_inline", subfmt="inline") == res
res = r"$\displaystyle 1.5 \times 10^{14} \; \mathrm{m\,s^{-1}}$"
assert qscalar.to_string(format="latex_inline", subfmt="display") == res
res = "[0 1 2] (Unit not initialised)"
assert np.arange(3).view(u.Quantity).to_string() == res
def test_repr_latex(self):
from astropy.units.quantity import conf
q2scalar = u.Quantity(1.5e14, "m/s")
assert self.scalarintq._repr_latex_() == r"$1 \; \mathrm{m}$"
assert self.scalarfloatq._repr_latex_() == r"$1.3 \; \mathrm{m}$"
assert (
q2scalar._repr_latex_() == r"$1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$"
)
assert self.arrq._repr_latex_() == r"$[1,~2.3,~8.9] \; \mathrm{m}$"
# Complex quantities
assert self.scalar_complex_q._repr_latex_() == r"$(1+2i) \; \mathrm{}$"
assert (
self.scalar_big_complex_q._repr_latex_()
== r"$(1 \times 10^{25}+2 \times 10^{52}i) \; \mathrm{}$"
)
assert (
self.scalar_big_neg_complex_q._repr_latex_()
== r"$(-1 \times 10^{36}-2 \times 10^{63}i) \; \mathrm{}$"
)
assert self.arr_complex_q._repr_latex_() == (
r"$[(0-0i),~(-1 \times 10^{36}-2 \times 10^{63}i),"
r"~(-2 \times 10^{36}-4 \times 10^{63}i)] \; \mathrm{}$"
)
assert r"\dots" in self.big_arr_complex_q._repr_latex_()
qmed = np.arange(100) * u.m
qbig = np.arange(1000) * u.m
qvbig = np.arange(10000) * 1e9 * u.m
pops = np.get_printoptions()
oldlat = conf.latex_array_threshold
try:
# check precision behavior
q = u.Quantity(987654321.123456789, "m/s")
qa = np.array([7.89123, 123456789.987654321, 0]) * u.cm
np.set_printoptions(precision=8)
assert (
q._repr_latex_() == r"$9.8765432 \times 10^{8} \; \mathrm{\frac{m}{s}}$"
)
assert (
qa._repr_latex_()
== r"$[7.89123,~1.2345679 \times 10^{8},~0] \; \mathrm{cm}$"
)
np.set_printoptions(precision=2)
assert q._repr_latex_() == r"$9.9 \times 10^{8} \; \mathrm{\frac{m}{s}}$"
assert qa._repr_latex_() == r"$[7.9,~1.2 \times 10^{8},~0] \; \mathrm{cm}$"
# check thresholding behavior
conf.latex_array_threshold = 100 # should be default
lsmed = qmed._repr_latex_()
assert r"\dots" not in lsmed
lsbig = qbig._repr_latex_()
assert r"\dots" in lsbig
lsvbig = qvbig._repr_latex_()
assert r"\dots" in lsvbig
conf.latex_array_threshold = 1001
lsmed = qmed._repr_latex_()
assert r"\dots" not in lsmed
lsbig = qbig._repr_latex_()
assert r"\dots" not in lsbig
lsvbig = qvbig._repr_latex_()
assert r"\dots" in lsvbig
conf.latex_array_threshold = -1 # means use the numpy threshold
np.set_printoptions(threshold=99)
lsmed = qmed._repr_latex_()
assert r"\dots" in lsmed
lsbig = qbig._repr_latex_()
assert r"\dots" in lsbig
lsvbig = qvbig._repr_latex_()
assert r"\dots" in lsvbig
assert lsvbig.endswith(",~1 \\times 10^{13}] \\; \\mathrm{m}$")
finally:
# prevent side-effects from influencing other tests
np.set_printoptions(**pops)
conf.latex_array_threshold = oldlat
qinfnan = [np.inf, -np.inf, np.nan] * u.m
assert qinfnan._repr_latex_() == r"$[\infty,~-\infty,~{\rm NaN}] \; \mathrm{m}$"
def test_decompose():
q1 = 5 * u.N
assert q1.decompose() == (5 * u.kg * u.m * u.s**-2)
def test_decompose_regression():
"""
Regression test for bug #1163
If decompose was called multiple times on a Quantity with an array and a
scale != 1, the result changed every time. This is because the value was
being referenced not copied, then modified, which changed the original
value.
"""
q = np.array([1, 2, 3]) * u.m / (2.0 * u.km)
assert np.all(q.decompose().value == np.array([0.0005, 0.001, 0.0015]))
assert np.all(q == np.array([1, 2, 3]) * u.m / (2.0 * u.km))
assert np.all(q.decompose().value == np.array([0.0005, 0.001, 0.0015]))
def test_arrays():
"""
Test using quantites with array values
"""
qsec = u.Quantity(np.arange(10), u.second)
assert isinstance(qsec.value, np.ndarray)
assert not qsec.isscalar
# len and indexing should work for arrays
assert len(qsec) == len(qsec.value)
qsecsub25 = qsec[2:5]
assert qsecsub25.unit == qsec.unit
assert isinstance(qsecsub25, u.Quantity)
assert len(qsecsub25) == 3
# make sure isscalar, len, and indexing behave correctly for non-arrays.
qsecnotarray = u.Quantity(10.0, u.second)
assert qsecnotarray.isscalar
with pytest.raises(TypeError):
len(qsecnotarray)
with pytest.raises(TypeError):
qsecnotarray[0]
qseclen0array = u.Quantity(np.array(10), u.second, dtype=int)
# 0d numpy array should act basically like a scalar
assert qseclen0array.isscalar
with pytest.raises(TypeError):
len(qseclen0array)
with pytest.raises(TypeError):
qseclen0array[0]
assert isinstance(qseclen0array.value, numbers.Integral)
a = np.array(
[(1.0, 2.0, 3.0), (4.0, 5.0, 6.0), (7.0, 8.0, 9.0)],
dtype=[("x", float), ("y", float), ("z", float)],
)
qkpc = u.Quantity(a, u.kpc)
assert not qkpc.isscalar
qkpc0 = qkpc[0]
assert qkpc0.value == a[0]
assert qkpc0.unit == qkpc.unit
assert isinstance(qkpc0, u.Quantity)
assert qkpc0.isscalar
qkpcx = qkpc["x"]
assert np.all(qkpcx.value == a["x"])
assert qkpcx.unit == qkpc.unit
assert isinstance(qkpcx, u.Quantity)
assert not qkpcx.isscalar
qkpcx1 = qkpc["x"][1]
assert qkpcx1.unit == qkpc.unit
assert isinstance(qkpcx1, u.Quantity)
assert qkpcx1.isscalar
qkpc1x = qkpc[1]["x"]
assert qkpc1x.isscalar
assert qkpc1x == qkpcx1
# can also create from lists, will auto-convert to arrays
qsec = u.Quantity(list(range(10)), u.second)
assert isinstance(qsec.value, np.ndarray)
# quantity math should work with arrays
assert_array_equal((qsec * 2).value, (np.arange(10) * 2))
assert_array_equal((qsec / 2).value, (np.arange(10) / 2))
# quantity addition/subtraction should *not* work with arrays b/c unit
# ambiguous
with pytest.raises(u.UnitsError):
assert_array_equal((qsec + 2).value, (np.arange(10) + 2))
with pytest.raises(u.UnitsError):
assert_array_equal((qsec - 2).value, (np.arange(10) + 2))
# should create by unit multiplication, too
qsec2 = np.arange(10) * u.second
qsec3 = u.second * np.arange(10)
assert np.all(qsec == qsec2)
assert np.all(qsec2 == qsec3)
# make sure numerical-converters fail when arrays are present
with pytest.raises(TypeError):
float(qsec)
with pytest.raises(TypeError):
int(qsec)
def test_array_indexing_slicing():
q = np.array([1.0, 2.0, 3.0]) * u.m
assert q[0] == 1.0 * u.m
assert np.all(q[0:2] == u.Quantity([1.0, 2.0], u.m))
def test_array_setslice():
q = np.array([1.0, 2.0, 3.0]) * u.m
q[1:2] = np.array([400.0]) * u.cm
assert np.all(q == np.array([1.0, 4.0, 3.0]) * u.m)
def test_inverse_quantity():
"""
Regression test from issue #679
"""
q = u.Quantity(4.0, u.meter / u.second)
qot = q / 2
toq = 2 / q
npqot = q / np.array(2)
assert npqot.value == 2.0
assert npqot.unit == (u.meter / u.second)
assert qot.value == 2.0
assert qot.unit == (u.meter / u.second)
assert toq.value == 0.5
assert toq.unit == (u.second / u.meter)
def test_quantity_mutability():
q = u.Quantity(9.8, u.meter / u.second / u.second)
with pytest.raises(AttributeError):
q.value = 3
with pytest.raises(AttributeError):
q.unit = u.kg
def test_quantity_initialized_with_quantity():
q1 = u.Quantity(60, u.second)
q2 = u.Quantity(q1, u.minute)
assert q2.value == 1
q3 = u.Quantity([q1, q2], u.second)
assert q3[0].value == 60
assert q3[1].value == 60
q4 = u.Quantity([q2, q1])
assert q4.unit == q2.unit
assert q4[0].value == 1
assert q4[1].value == 1
def test_quantity_string_unit():
q1 = 1.0 * u.m / "s"
assert q1.value == 1
assert q1.unit == (u.m / u.s)
q2 = q1 * "m"
assert q2.unit == ((u.m * u.m) / u.s)
def test_quantity_invalid_unit_string():
with pytest.raises(ValueError):
"foo" * u.m
def test_implicit_conversion():
q = u.Quantity(1.0, u.meter)
# Manually turn this on to simulate what might happen in a subclass
q._include_easy_conversion_members = True
assert_allclose(q.centimeter, 100)
assert_allclose(q.cm, 100)
assert_allclose(q.parsec, 3.240779289469756e-17)
def test_implicit_conversion_autocomplete():
q = u.Quantity(1.0, u.meter)
# Manually turn this on to simulate what might happen in a subclass
q._include_easy_conversion_members = True
q.foo = 42
attrs = dir(q)
assert "centimeter" in attrs
assert "cm" in attrs
assert "parsec" in attrs
assert "foo" in attrs
assert "to" in attrs
assert "value" in attrs
# Something from the base class, object
assert "__setattr__" in attrs
with pytest.raises(AttributeError):
q.l
def test_quantity_iterability():
"""Regressiont est for issue #878.
Scalar quantities should not be iterable and should raise a type error on
iteration.
"""
q1 = [15.0, 17.0] * u.m
assert isiterable(q1)
q2 = next(iter(q1))
assert q2 == 15.0 * u.m
assert not isiterable(q2)
pytest.raises(TypeError, iter, q2)
def test_copy():
q1 = u.Quantity(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), unit=u.m)
q2 = q1.copy()
assert np.all(q1.value == q2.value)
assert q1.unit == q2.unit
assert q1.dtype == q2.dtype
assert q1.value is not q2.value
q3 = q1.copy(order="F")
assert q3.flags["F_CONTIGUOUS"]
assert np.all(q1.value == q3.value)
assert q1.unit == q3.unit
assert q1.dtype == q3.dtype
assert q1.value is not q3.value
q4 = q1.copy(order="C")
assert q4.flags["C_CONTIGUOUS"]
assert np.all(q1.value == q4.value)
assert q1.unit == q4.unit
assert q1.dtype == q4.dtype
assert q1.value is not q4.value
def test_deepcopy():
q1 = u.Quantity(np.array([1.0, 2.0, 3.0]), unit=u.m)
q2 = copy.deepcopy(q1)
assert isinstance(q2, u.Quantity)
assert np.all(q1.value == q2.value)
assert q1.unit == q2.unit
assert q1.dtype == q2.dtype
assert q1.value is not q2.value
def test_equality_numpy_scalar():
"""
A regression test to ensure that numpy scalars are correctly compared
(which originally failed due to the lack of ``__array_priority__``).
"""
assert 10 != 10.0 * u.m
assert np.int64(10) != 10 * u.m
assert 10 * u.m != np.int64(10)
def test_quantity_pickelability():
"""
Testing pickleability of quantity
"""
q1 = np.arange(10) * u.m
q2 = pickle.loads(pickle.dumps(q1))
assert np.all(q1.value == q2.value)
assert q1.unit.is_equivalent(q2.unit)
assert q1.unit == q2.unit
def test_quantity_initialisation_from_string():
q = u.Quantity("1")
assert q.unit == u.dimensionless_unscaled
assert q.value == 1.0
q = u.Quantity("1.5 m/s")
assert q.unit == u.m / u.s
assert q.value == 1.5
assert u.Unit(q) == u.Unit("1.5 m/s")
q = u.Quantity(".5 m")
assert q == u.Quantity(0.5, u.m)
q = u.Quantity("-1e1km")
assert q == u.Quantity(-10, u.km)
q = u.Quantity("-1e+1km")
assert q == u.Quantity(-10, u.km)
q = u.Quantity("+.5km")
assert q == u.Quantity(0.5, u.km)
q = u.Quantity("+5e-1km")
assert q == u.Quantity(0.5, u.km)
q = u.Quantity("5", u.m)
assert q == u.Quantity(5.0, u.m)
q = u.Quantity("5 km", u.m)
assert q.value == 5000.0
assert q.unit == u.m
q = u.Quantity("5Em")
assert q == u.Quantity(5.0, u.Em)
with pytest.raises(TypeError):
u.Quantity("")
with pytest.raises(TypeError):
u.Quantity("m")
with pytest.raises(TypeError):
u.Quantity("1.2.3 deg")
with pytest.raises(TypeError):
u.Quantity("1+deg")
with pytest.raises(TypeError):
u.Quantity("1-2deg")
with pytest.raises(TypeError):
u.Quantity("1.2e-13.3m")
with pytest.raises(TypeError):
u.Quantity(["5"])
with pytest.raises(TypeError):
u.Quantity(np.array(["5"]))
with pytest.raises(ValueError):
u.Quantity("5E")
with pytest.raises(ValueError):
u.Quantity("5 foo")
def test_unsupported():
q1 = np.arange(10) * u.m
with pytest.raises(TypeError):
np.bitwise_and(q1, q1)
def test_unit_identity():
q = 1.0 * u.hour
assert q.unit is u.hour
def test_quantity_to_view():
q1 = np.array([1000, 2000]) * u.m
q2 = q1.to(u.km)
assert q1.value[0] == 1000
assert q2.value[0] == 1
def test_quantity_tuple_power():
with pytest.raises(ValueError):
(5.0 * u.m) ** (1, 2)
def test_quantity_fraction_power():
q = (25.0 * u.m**2) ** Fraction(1, 2)
assert q.value == 5.0
assert q.unit == u.m
# Regression check to ensure we didn't create an object type by raising
# the value of the quantity to a Fraction. [#3922]
assert q.dtype.kind == "f"
def test_quantity_from_table():
"""
Checks that units from tables are respected when converted to a Quantity.
This also generically checks the use of *anything* with a `unit` attribute
passed into Quantity
"""
from astropy.table import Table
t = Table(data=[np.arange(5), np.arange(5)], names=["a", "b"])
t["a"].unit = u.kpc
qa = u.Quantity(t["a"])
assert qa.unit == u.kpc
assert_array_equal(qa.value, t["a"])
qb = u.Quantity(t["b"])
assert qb.unit == u.dimensionless_unscaled
assert_array_equal(qb.value, t["b"])
# This does *not* auto-convert, because it's not necessarily obvious that's
# desired. Instead we revert to standard `Quantity` behavior
qap = u.Quantity(t["a"], u.pc)
assert qap.unit == u.pc
assert_array_equal(qap.value, t["a"] * 1000)
qbp = u.Quantity(t["b"], u.pc)
assert qbp.unit == u.pc
assert_array_equal(qbp.value, t["b"])
# Also check with a function unit (regression test for gh-8430)
t["a"].unit = u.dex(u.cm / u.s**2)
fq = u.Dex(t["a"])
assert fq.unit == u.dex(u.cm / u.s**2)
assert_array_equal(fq.value, t["a"])
fq2 = u.Quantity(t["a"], subok=True)
assert isinstance(fq2, u.Dex)
assert fq2.unit == u.dex(u.cm / u.s**2)
assert_array_equal(fq2.value, t["a"])
with pytest.raises(u.UnitTypeError):
u.Quantity(t["a"])
def test_assign_slice_with_quantity_like():
# Regression tests for gh-5961
from astropy.table import Column, Table
# first check directly that we can use a Column to assign to a slice.
c = Column(np.arange(10.0), unit=u.mm)
q = u.Quantity(c)
q[:2] = c[:2]
# next check that we do not fail the original problem.
t = Table()
t["x"] = np.arange(10) * u.mm
t["y"] = np.ones(10) * u.mm
assert type(t["x"]) is Column
xy = np.vstack([t["x"], t["y"]]).T * u.mm
ii = [0, 2, 4]
assert xy[ii, 0].unit == t["x"][ii].unit
# should not raise anything
xy[ii, 0] = t["x"][ii]
def test_insert():
"""
Test Quantity.insert method. This does not test the full capabilities
of the underlying np.insert, but hits the key functionality for
Quantity.
"""
q = [1, 2] * u.m
# Insert a compatible float with different units
q2 = q.insert(0, 1 * u.km)
assert np.all(q2.value == [1000, 1, 2])
assert q2.unit is u.m
assert q2.dtype.kind == "f"
if minversion(np, "1.8.0"):
q2 = q.insert(1, [1, 2] * u.km)
assert np.all(q2.value == [1, 1000, 2000, 2])
assert q2.unit is u.m
# Cannot convert 1.5 * u.s to m
with pytest.raises(u.UnitsError):
q.insert(1, 1.5 * u.s)
# Tests with multi-dim quantity
q = [[1, 2], [3, 4]] * u.m
q2 = q.insert(1, [10, 20] * u.m, axis=0)
assert np.all(q2.value == [[1, 2], [10, 20], [3, 4]])
q2 = q.insert(1, [10, 20] * u.m, axis=1)
assert np.all(q2.value == [[1, 10, 2], [3, 20, 4]])
q2 = q.insert(1, 10 * u.m, axis=1)
assert np.all(q2.value == [[1, 10, 2], [3, 10, 4]])
def test_repr_array_of_quantity():
"""
Test print/repr of object arrays of Quantity objects with different
units.
Regression test for the issue first reported in
https://github.com/astropy/astropy/issues/3777
"""
a = np.array([1 * u.m, 2 * u.s], dtype=object)
assert repr(a) == "array([<Quantity 1. m>, <Quantity 2. s>], dtype=object)"
assert str(a) == "[<Quantity 1. m> <Quantity 2. s>]"
class TestSpecificTypeQuantity:
def setup_method(self):
class Length(u.SpecificTypeQuantity):
_equivalent_unit = u.m
class Length2(Length):
_default_unit = u.m
class Length3(Length):
_unit = u.m
self.Length = Length
self.Length2 = Length2
self.Length3 = Length3
def test_creation(self):
l = self.Length(np.arange(10.0) * u.km)
assert type(l) is self.Length
with pytest.raises(u.UnitTypeError):
self.Length(np.arange(10.0) * u.hour)
with pytest.raises(u.UnitTypeError):
self.Length(np.arange(10.0))
l2 = self.Length2(np.arange(5.0))
assert type(l2) is self.Length2
assert l2._default_unit is self.Length2._default_unit
with pytest.raises(u.UnitTypeError):
self.Length3(np.arange(10.0))
def test_view(self):
l = (np.arange(5.0) * u.km).view(self.Length)
assert type(l) is self.Length
with pytest.raises(u.UnitTypeError):
(np.arange(5.0) * u.s).view(self.Length)
v = np.arange(5.0).view(self.Length)
assert type(v) is self.Length
assert v._unit is None
l3 = np.ones((2, 2)).view(self.Length3)
assert type(l3) is self.Length3
assert l3.unit is self.Length3._unit
def test_operation_precedence_and_fallback(self):
l = self.Length(np.arange(5.0) * u.cm)
sum1 = l + 1.0 * u.m
assert type(sum1) is self.Length
sum2 = 1.0 * u.km + l
assert type(sum2) is self.Length
sum3 = l + l
assert type(sum3) is self.Length
res1 = l * (1.0 * u.m)
assert type(res1) is u.Quantity
res2 = l * l
assert type(res2) is u.Quantity
def test_unit_class_override():
class MyQuantity(u.Quantity):
pass
my_unit = u.Unit("my_deg", u.deg)
my_unit._quantity_class = MyQuantity
q1 = u.Quantity(1.0, my_unit)
assert type(q1) is u.Quantity
q2 = u.Quantity(1.0, my_unit, subok=True)
assert type(q2) is MyQuantity
class QuantityMimic:
def __init__(self, value, unit):
self.value = value
self.unit = unit
def __array__(self):
return np.array(self.value)
class QuantityMimic2(QuantityMimic):
def to(self, unit):
return u.Quantity(self.value, self.unit).to(unit)
def to_value(self, unit):
return u.Quantity(self.value, self.unit).to_value(unit)
class TestQuantityMimics:
"""Test Quantity Mimics that are not ndarray subclasses."""
@pytest.mark.parametrize("Mimic", (QuantityMimic, QuantityMimic2))
def test_mimic_input(self, Mimic):
value = np.arange(10.0)
mimic = Mimic(value, u.m)
q = u.Quantity(mimic)
assert q.unit == u.m
assert np.all(q.value == value)
q2 = u.Quantity(mimic, u.cm)
assert q2.unit == u.cm
assert np.all(q2.value == 100 * value)
@pytest.mark.parametrize("Mimic", (QuantityMimic, QuantityMimic2))
def test_mimic_setting(self, Mimic):
mimic = Mimic([1.0, 2.0], u.m)
q = u.Quantity(np.arange(10.0), u.cm)
q[8:] = mimic
assert np.all(q[:8].value == np.arange(8.0))
assert np.all(q[8:].value == [100.0, 200.0])
def test_mimic_function_unit(self):
mimic = QuantityMimic([1.0, 2.0], u.dex(u.cm / u.s**2))
d = u.Dex(mimic)
assert isinstance(d, u.Dex)
assert d.unit == u.dex(u.cm / u.s**2)
assert np.all(d.value == [1.0, 2.0])
q = u.Quantity(mimic, subok=True)
assert isinstance(q, u.Dex)
assert q.unit == u.dex(u.cm / u.s**2)
assert np.all(q.value == [1.0, 2.0])
with pytest.raises(u.UnitTypeError):
u.Quantity(mimic)
def test_masked_quantity_str_repr():
"""Ensure we don't break masked Quantity representation."""
# Really, masked quantities do not work well, but at least let the
# basics work.
masked_quantity = np.ma.array([1, 2, 3, 4] * u.kg, mask=[True, False, True, False])
str(masked_quantity)
repr(masked_quantity)
class TestQuantitySubclassAboveAndBelow:
@classmethod
def setup_class(self):
class MyArray(np.ndarray):
def __array_finalize__(self, obj):
super_array_finalize = super().__array_finalize__
if super_array_finalize is not None:
super_array_finalize(obj)
if hasattr(obj, "my_attr"):
self.my_attr = obj.my_attr
self.MyArray = MyArray
self.MyQuantity1 = type("MyQuantity1", (u.Quantity, MyArray), dict(my_attr="1"))
self.MyQuantity2 = type("MyQuantity2", (MyArray, u.Quantity), dict(my_attr="2"))
def test_setup(self):
mq1 = self.MyQuantity1(10, u.m)
assert isinstance(mq1, self.MyQuantity1)
assert mq1.my_attr == "1"
assert mq1.unit is u.m
mq2 = self.MyQuantity2(10, u.m)
assert isinstance(mq2, self.MyQuantity2)
assert mq2.my_attr == "2"
assert mq2.unit is u.m
def test_attr_propagation(self):
mq1 = self.MyQuantity1(10, u.m)
mq12 = self.MyQuantity2(mq1)
assert isinstance(mq12, self.MyQuantity2)
assert not isinstance(mq12, self.MyQuantity1)
assert mq12.my_attr == "1"
assert mq12.unit is u.m
mq2 = self.MyQuantity2(10, u.m)
mq21 = self.MyQuantity1(mq2)
assert isinstance(mq21, self.MyQuantity1)
assert not isinstance(mq21, self.MyQuantity2)
assert mq21.my_attr == "2"
assert mq21.unit is u.m
|
3015aef4c4ce4cbf3806f5f62215dd50e544396f49dffa4baba2bd18e8b44cb8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import itertools
import numpy as np
import numpy.lib.recfunctions as rfn
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.units.quantity_helper.function_helpers import (
ARRAY_FUNCTION_ENABLED,
DISPATCHED_FUNCTIONS,
FUNCTION_HELPERS,
IGNORED_FUNCTIONS,
SUBCLASS_SAFE_FUNCTIONS,
TBD_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
)
from astropy.utils.compat import NUMPY_LT_1_23, NUMPY_LT_1_24
needs_array_function = pytest.mark.xfail(
not ARRAY_FUNCTION_ENABLED, reason="Needs __array_function__ support"
)
# To get the functions that could be covered, we look for those that
# are wrapped. Of course, this does not give a full list pre-1.17.
def get_wrapped_functions(*modules):
wrapped_functions = {}
for mod in modules:
for name, f in mod.__dict__.items():
if f is np.printoptions or name.startswith("_"):
continue
if callable(f) and hasattr(f, "__wrapped__"):
wrapped_functions[name] = f
return wrapped_functions
all_wrapped_functions = get_wrapped_functions(
np, np.fft, np.linalg, np.lib.recfunctions
)
all_wrapped = set(all_wrapped_functions.values())
class CoverageMeta(type):
"""Meta class that tracks which functions are covered by tests.
Assumes that a test is called 'test_<function_name>'.
"""
covered = set()
def __new__(mcls, name, bases, members):
for k, v in members.items():
if inspect.isfunction(v) and k.startswith("test"):
f = k.replace("test_", "")
if f in all_wrapped_functions:
mcls.covered.add(all_wrapped_functions[f])
return super().__new__(mcls, name, bases, members)
class BasicTestSetup(metaclass=CoverageMeta):
"""Test setup for functions that should not change the unit.
Also provides a default Quantity with shape (3, 3) and units of m.
"""
def setup_method(self):
self.q = np.arange(9.0).reshape(3, 3) / 4.0 * u.m
class InvariantUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
o = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs) * self.q.unit
assert o.shape == expected.shape
assert np.all(o == expected)
class NoUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, *kwargs)
assert type(out) is type(expected)
if isinstance(expected, tuple):
assert all(np.all(o == x) for o, x in zip(out, expected))
else:
assert np.all(out == expected)
class TestShapeInformation(BasicTestSetup):
def test_shape(self):
assert np.shape(self.q) == (3, 3)
def test_size(self):
assert np.size(self.q) == 9
def test_ndim(self):
assert np.ndim(self.q) == 2
class TestShapeManipulation(InvariantUnitTestSetup):
# Note: do not parametrize the below, since test names are used
# to check coverage.
def test_reshape(self):
self.check(np.reshape, (9, 1))
def test_ravel(self):
self.check(np.ravel)
def test_moveaxis(self):
self.check(np.moveaxis, 0, 1)
def test_rollaxis(self):
self.check(np.rollaxis, 0, 2)
def test_swapaxes(self):
self.check(np.swapaxes, 0, 1)
def test_transpose(self):
self.check(np.transpose)
def test_atleast_1d(self):
q = 1.0 * u.m
o, so = np.atleast_1d(q, self.q)
assert o.shape == (1,)
assert o == q
expected = np.atleast_1d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_2d(self):
q = 1.0 * u.m
o, so = np.atleast_2d(q, self.q)
assert o.shape == (1, 1)
assert o == q
expected = np.atleast_2d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_3d(self):
q = 1.0 * u.m
o, so = np.atleast_3d(q, self.q)
assert o.shape == (1, 1, 1)
assert o == q
expected = np.atleast_3d(self.q.value) * u.m
assert np.all(so == expected)
def test_expand_dims(self):
self.check(np.expand_dims, 1)
def test_squeeze(self):
o = np.squeeze(self.q[:, np.newaxis, :])
assert o.shape == (3, 3)
assert np.all(o == self.q)
def test_flip(self):
self.check(np.flip)
def test_fliplr(self):
self.check(np.fliplr)
def test_flipud(self):
self.check(np.flipud)
def test_rot90(self):
self.check(np.rot90)
def test_broadcast_to(self):
# Decided *not* to change default for subok for Quantity, since
# that would be contrary to the docstring and might break code.
self.check(np.broadcast_to, (3, 3, 3), subok=True)
out = np.broadcast_to(self.q, (3, 3, 3))
assert type(out) is np.ndarray # NOT Quantity
def test_broadcast_arrays(self):
# Decided *not* to change default for subok for Quantity, since
# that would be contrary to the docstring and might break code.
q2 = np.ones((3, 3, 3)) / u.s
o1, o2 = np.broadcast_arrays(self.q, q2, subok=True)
assert isinstance(o1, u.Quantity)
assert isinstance(o2, u.Quantity)
assert o1.shape == o2.shape == (3, 3, 3)
assert np.all(o1 == self.q)
assert np.all(o2 == q2)
a1, a2 = np.broadcast_arrays(self.q, q2)
assert type(a1) is np.ndarray
assert type(a2) is np.ndarray
class TestArgFunctions(NoUnitTestSetup):
def test_argmin(self):
self.check(np.argmin)
def test_argmax(self):
self.check(np.argmax)
def test_argsort(self):
self.check(np.argsort)
def test_lexsort(self):
self.check(np.lexsort)
def test_searchsorted(self):
q = self.q.ravel()
q2 = np.array([150.0, 350.0]) * u.cm
out = np.searchsorted(q, q2)
expected = np.searchsorted(q.value, q2.to_value(q.unit))
assert np.all(out == expected)
def test_nonzero(self):
self.check(np.nonzero)
def test_argwhere(self):
self.check(np.argwhere)
@needs_array_function
def test_argpartition(self):
self.check(np.argpartition, 2)
def test_flatnonzero(self):
self.check(np.flatnonzero)
class TestAlongAxis(BasicTestSetup):
def test_take_along_axis(self):
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
out = np.take_along_axis(self.q, indices, axis=0)
expected = np.take_along_axis(self.q.value, indices, axis=0) * self.q.unit
assert np.all(out == expected)
def test_put_along_axis(self):
q = self.q.copy()
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
np.put_along_axis(q, indices, axis=0, values=-100 * u.cm)
expected = q.value.copy()
np.put_along_axis(expected, indices, axis=0, values=-1)
expected = expected * q.unit
assert np.all(q == expected)
@pytest.mark.parametrize("axis", (0, 1))
def test_apply_along_axis(self, axis):
out = np.apply_along_axis(np.square, axis, self.q)
expected = np.apply_along_axis(np.square, axis, self.q.value) * self.q.unit**2
assert_array_equal(out, expected)
@needs_array_function
@pytest.mark.parametrize("axes", ((1,), (0,), (0, 1)))
def test_apply_over_axes(self, axes):
def function(x, axis):
return np.sum(np.square(x), axis)
out = np.apply_over_axes(function, self.q, axes)
expected = np.apply_over_axes(function, self.q.value, axes)
expected = expected * self.q.unit ** (2 * len(axes))
assert_array_equal(out, expected)
class TestIndicesFrom(NoUnitTestSetup):
def test_diag_indices_from(self):
self.check(np.diag_indices_from)
def test_triu_indices_from(self):
self.check(np.triu_indices_from)
def test_tril_indices_from(self):
self.check(np.tril_indices_from)
class TestRealImag(InvariantUnitTestSetup):
def setup_method(self):
self.q = (np.arange(9.0).reshape(3, 3) + 1j) * u.m
def test_real(self):
self.check(np.real)
def test_imag(self):
self.check(np.imag)
class TestCopyAndCreation(InvariantUnitTestSetup):
@needs_array_function
def test_copy(self):
self.check(np.copy)
# Also as kwarg
copy = np.copy(a=self.q)
assert_array_equal(copy, self.q)
@needs_array_function
def test_asfarray(self):
self.check(np.asfarray)
farray = np.asfarray(a=self.q)
assert_array_equal(farray, self.q)
def test_empty_like(self):
o = np.empty_like(self.q)
assert o.shape == (3, 3)
assert isinstance(o, u.Quantity)
assert o.unit == self.q.unit
o2 = np.empty_like(prototype=self.q)
assert o2.shape == (3, 3)
assert isinstance(o2, u.Quantity)
assert o2.unit == self.q.unit
o3 = np.empty_like(self.q, subok=False)
assert type(o3) is np.ndarray
def test_zeros_like(self):
self.check(np.zeros_like)
o2 = np.zeros_like(a=self.q)
assert_array_equal(o2, self.q * 0.0)
def test_ones_like(self):
self.check(np.ones_like)
@needs_array_function
def test_full_like(self):
o = np.full_like(self.q, 0.5 * u.km)
expected = np.empty_like(self.q.value) * u.m
expected[...] = 0.5 * u.km
assert np.all(o == expected)
with pytest.raises(u.UnitsError):
np.full_like(self.q, 0.5 * u.s)
class TestAccessingParts(InvariantUnitTestSetup):
def test_diag(self):
self.check(np.diag)
@needs_array_function
def test_diag_1d_input(self):
# Also check 1-D case; drops unit w/o __array_function__.
q = self.q.ravel()
o = np.diag(q)
expected = np.diag(q.value) << q.unit
assert o.unit == self.q.unit
assert o.shape == expected.shape
assert_array_equal(o, expected)
def test_diagonal(self):
self.check(np.diagonal)
def test_diagflat(self):
self.check(np.diagflat)
def test_compress(self):
o = np.compress([True, False, True], self.q, axis=0)
expected = np.compress([True, False, True], self.q.value, axis=0) * self.q.unit
assert np.all(o == expected)
def test_extract(self):
o = np.extract([True, False, True], self.q)
expected = np.extract([True, False, True], self.q.value) * self.q.unit
assert np.all(o == expected)
def test_delete(self):
self.check(np.delete, slice(1, 2), 0)
self.check(np.delete, [0, 2], 1)
def test_trim_zeros(self):
q = self.q.ravel()
out = np.trim_zeros(q)
expected = np.trim_zeros(q.value) * u.m
assert np.all(out == expected)
def test_roll(self):
self.check(np.roll, 1)
self.check(np.roll, 1, axis=0)
def test_take(self):
self.check(np.take, [0, 1], axis=1)
self.check(np.take, 1)
class TestSettingParts(metaclass=CoverageMeta):
def test_put(self):
q = np.arange(3.0) * u.m
np.put(q, [0, 2], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
@needs_array_function
def test_putmask(self):
q = np.arange(3.0) * u.m
mask = [True, False, True]
values = [50, 0, 150] * u.cm
np.putmask(q, mask, values)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
with pytest.raises(u.UnitsError):
np.putmask(q, mask, values.value)
with pytest.raises(u.UnitsError):
np.putmask(q.value, mask, values)
a = np.arange(3.0)
values = [50, 0, 150] * u.percent
np.putmask(a, mask, values)
expected = np.array([0.5, 1.0, 1.5])
assert np.all(a == expected)
@needs_array_function
def test_place(self):
q = np.arange(3.0) * u.m
np.place(q, [True, False, True], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.0)
np.place(a, [True, False, True], [50, 150] * u.percent)
assert type(a) is np.ndarray
expected = np.array([0.5, 1.0, 1.5])
assert np.all(a == expected)
@needs_array_function
def test_copyto(self):
q = np.arange(3.0) * u.m
np.copyto(q, [50, 0, 150] * u.cm, where=[True, False, True])
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.0)
np.copyto(a, [50, 0, 150] * u.percent, where=[True, False, True])
assert type(a) is np.ndarray
expected = np.array([0.5, 1.0, 1.5])
assert np.all(a == expected)
def test_fill_diagonal(self):
q = np.arange(9.0).reshape(3, 3) * u.m
expected = q.value.copy()
np.fill_diagonal(expected, 0.25)
expected = expected * u.m
np.fill_diagonal(q, 25.0 * u.cm)
assert q.unit == u.m
assert np.all(q == expected)
class TestRepeat(InvariantUnitTestSetup):
def test_tile(self):
self.check(np.tile, 2)
def test_repeat(self):
self.check(np.repeat, 2)
@needs_array_function
def test_resize(self):
self.check(np.resize, (4, 4))
class TestConcatenate(metaclass=CoverageMeta):
def setup_method(self):
self.q1 = np.arange(6.0).reshape(2, 3) * u.m
self.q2 = self.q1.to(u.cm)
def check(self, func, *args, **kwargs):
q_list = kwargs.pop("q_list", [self.q1, self.q2])
q_ref = kwargs.pop("q_ref", q_list[0])
o = func(q_list, *args, **kwargs)
v_list = [q_ref._to_own_unit(q) for q in q_list]
expected = func(v_list, *args, **kwargs) * q_ref.unit
assert o.shape == expected.shape
assert np.all(o == expected)
@needs_array_function
def test_concatenate(self):
self.check(np.concatenate)
self.check(np.concatenate, axis=1)
# regression test for gh-13322.
self.check(np.concatenate, dtype="f4")
self.check(
np.concatenate,
q_list=[np.zeros(self.q1.shape), self.q1, self.q2],
q_ref=self.q1,
)
out = np.empty((4, 3)) * u.dimensionless_unscaled
result = np.concatenate([self.q1, self.q2], out=out)
assert out is result
assert out.unit == self.q1.unit
expected = (
np.concatenate([self.q1.value, self.q2.to_value(self.q1.unit)])
* self.q1.unit
)
assert np.all(result == expected)
with pytest.raises(TypeError):
np.concatenate([self.q1, object()])
@needs_array_function
def test_stack(self):
self.check(np.stack)
@needs_array_function
def test_column_stack(self):
self.check(np.column_stack)
@needs_array_function
def test_hstack(self):
self.check(np.hstack)
@needs_array_function
def test_vstack(self):
self.check(np.vstack)
@needs_array_function
def test_dstack(self):
self.check(np.dstack)
@needs_array_function
def test_block(self):
self.check(np.block)
result = np.block([[0.0, 1.0 * u.m], [1.0 * u.cm, 2.0 * u.km]])
assert np.all(result == np.block([[0, 1.0], [0.01, 2000.0]]) << u.m)
@needs_array_function
def test_append(self):
out = np.append(self.q1, self.q2, axis=0)
assert out.unit == self.q1.unit
expected = (
np.append(self.q1.value, self.q2.to_value(self.q1.unit), axis=0)
* self.q1.unit
)
assert np.all(out == expected)
a = np.arange(3.0)
result = np.append(a, 50.0 * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.append(a, 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
@needs_array_function
def test_insert(self):
# Unit of inserted values is not ignored.
q = np.arange(12.0).reshape(6, 2) * u.m
out = np.insert(q, (3, 5), [50.0, 25.0] * u.cm)
assert isinstance(out, u.Quantity)
assert out.unit == q.unit
expected = np.insert(q.value, (3, 5), [0.5, 0.25]) << q.unit
assert np.all(out == expected)
# 0 can have any unit.
out2 = np.insert(q, (3, 5), 0)
expected2 = np.insert(q.value, (3, 5), 0) << q.unit
assert np.all(out2 == expected2)
a = np.arange(3.0)
result = np.insert(a, (2,), 50.0 * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.insert(a, (2,), 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
with pytest.raises(TypeError):
np.insert(q, 3 * u.cm, 50.0 * u.cm)
with pytest.raises(u.UnitsError):
np.insert(q, (3, 5), 0.0 * u.s)
@needs_array_function
def test_pad(self):
q = np.arange(1.0, 6.0) * u.m
out = np.pad(q, (2, 3), "constant", constant_values=(0.0, 150.0 * u.cm))
assert out.unit == q.unit
expected = (
np.pad(q.value, (2, 3), "constant", constant_values=(0.0, 1.5)) * q.unit
)
assert np.all(out == expected)
out2 = np.pad(q, (2, 3), "constant", constant_values=150.0 * u.cm)
assert out2.unit == q.unit
expected2 = np.pad(q.value, (2, 3), "constant", constant_values=1.5) * q.unit
assert np.all(out2 == expected2)
out3 = np.pad(q, (2, 3), "linear_ramp", end_values=(25.0 * u.cm, 0.0))
assert out3.unit == q.unit
expected3 = (
np.pad(q.value, (2, 3), "linear_ramp", end_values=(0.25, 0.0)) * q.unit
)
assert np.all(out3 == expected3)
class TestSplit(metaclass=CoverageMeta):
def setup_method(self):
self.q = np.arange(54.0).reshape(3, 3, 6) * u.m
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs)
expected = [x * self.q.unit for x in expected]
assert len(out) == len(expected)
assert all(o.shape == x.shape for o, x in zip(out, expected))
assert all(np.all(o == x) for o, x in zip(out, expected))
def test_split(self):
self.check(np.split, [1])
def test_array_split(self):
self.check(np.array_split, 2)
def test_hsplit(self):
self.check(np.hsplit, [1, 4])
def test_vsplit(self):
self.check(np.vsplit, [1])
def test_dsplit(self):
self.check(np.dsplit, [1])
class TestUfuncReductions(InvariantUnitTestSetup):
def test_amax(self):
self.check(np.amax)
def test_amin(self):
self.check(np.amin)
def test_sum(self):
self.check(np.sum)
def test_cumsum(self):
self.check(np.cumsum)
def test_any(self):
with pytest.raises(TypeError):
np.any(self.q)
def test_all(self):
with pytest.raises(TypeError):
np.all(self.q)
def test_sometrue(self):
with pytest.raises(TypeError):
np.sometrue(self.q)
def test_alltrue(self):
with pytest.raises(TypeError):
np.alltrue(self.q)
def test_prod(self):
with pytest.raises(u.UnitsError):
np.prod(self.q)
def test_product(self):
with pytest.raises(u.UnitsError):
np.product(self.q)
def test_cumprod(self):
with pytest.raises(u.UnitsError):
np.cumprod(self.q)
def test_cumproduct(self):
with pytest.raises(u.UnitsError):
np.cumproduct(self.q)
class TestUfuncLike(InvariantUnitTestSetup):
def test_ptp(self):
self.check(np.ptp)
self.check(np.ptp, axis=0)
def test_round_(self):
self.check(np.round_)
def test_around(self):
self.check(np.around)
def test_fix(self):
self.check(np.fix)
def test_angle(self):
q = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j]) * u.m
out = np.angle(q)
expected = np.angle(q.value) * u.radian
assert np.all(out == expected)
def test_i0(self):
q = np.array([0.0, 10.0, 20.0]) * u.percent
out = np.i0(q)
expected = np.i0(q.to_value(u.one)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.i0(self.q)
def test_clip(self):
qmin = 200 * u.cm
qmax = [270, 280, 290] * u.cm
out = np.clip(self.q, qmin, qmax)
unit = self.q.unit
expected = (
np.clip(self.q.value, qmin.to_value(unit), qmax.to_value(unit)) * unit
)
assert np.all(out == expected)
@needs_array_function
def test_sinc(self):
q = [0.0, 3690.0, -270.0, 690.0] * u.deg
out = np.sinc(q)
expected = np.sinc(q.to_value(u.radian)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.sinc(1.0 * u.one)
@needs_array_function
def test_where(self):
out = np.where([True, False, True], self.q, 1.0 * u.km)
expected = np.where([True, False, True], self.q.value, 1000.0) * self.q.unit
assert np.all(out == expected)
@needs_array_function
def test_choose(self):
# from np.choose docstring
a = np.array([0, 1]).reshape((2, 1, 1))
q1 = np.array([1, 2, 3]).reshape((1, 3, 1)) * u.cm
q2 = np.array([-1, -2, -3, -4, -5]).reshape((1, 1, 5)) * u.m
out = np.choose(a, (q1, q2))
# result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
expected = np.choose(a, (q1.value, q2.to_value(q1.unit))) * u.cm
assert np.all(out == expected)
@needs_array_function
def test_select(self):
q = self.q
out = np.select(
[q < 0.55 * u.m, q > 1.0 * u.m], [q, q.to(u.cm)], default=-1.0 * u.km
)
expected = (
np.select([q.value < 0.55, q.value > 1], [q.value, q.value], default=-1000)
* u.m
)
assert np.all(out == expected)
@needs_array_function
def test_real_if_close(self):
q = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j]) * u.m
out = np.real_if_close(q)
expected = np.real_if_close(q.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_tril(self):
self.check(np.tril)
@needs_array_function
def test_triu(self):
self.check(np.triu)
@needs_array_function
def test_unwrap(self):
q = [0.0, 3690.0, -270.0, 690.0] * u.deg
out = np.unwrap(q)
expected = (np.unwrap(q.to_value(u.rad)) * u.rad).to(q.unit)
assert out.unit == expected.unit
assert np.allclose(out, expected, atol=1 * u.urad, rtol=0)
with pytest.raises(u.UnitsError):
np.unwrap([1.0, 2.0] * u.m)
with pytest.raises(u.UnitsError):
np.unwrap(q, discont=1.0 * u.m)
def test_nan_to_num(self):
q = np.array([-np.inf, +np.inf, np.nan, 3.0, 4.0]) * u.m
out = np.nan_to_num(q)
expected = np.nan_to_num(q.value) * q.unit
assert np.all(out == expected)
@needs_array_function
def test_nan_to_num_complex(self):
q = np.array([-np.inf, +np.inf, np.nan, 3.0, 4.0]) * u.m
out = np.nan_to_num(q, nan=1.0 * u.km, posinf=2.0 * u.km, neginf=-2 * u.km)
expected = [-2000.0, 2000.0, 1000.0, 3.0, 4.0] * u.m
assert np.all(out == expected)
class TestUfuncLikeTests(metaclass=CoverageMeta):
def setup_method(self):
self.q = np.array([-np.inf, +np.inf, np.nan, 3.0, 4.0]) * u.m
def check(self, func):
out = func(self.q)
expected = func(self.q.value)
assert type(out) is np.ndarray
assert out.dtype.kind == "b"
assert np.all(out == expected)
def test_isposinf(self):
self.check(np.isposinf)
def test_isneginf(self):
self.check(np.isneginf)
def test_isreal(self):
self.check(np.isreal)
assert not np.isreal([1.0 + 1j] * u.m)
def test_iscomplex(self):
self.check(np.iscomplex)
assert np.iscomplex([1.0 + 1j] * u.m)
def test_isclose(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 102.0, 199.0]) * u.cm
atol = 1.5 * u.cm
rtol = 1.0 * u.percent
out = np.isclose(q1, q2, atol=atol)
expected = np.isclose(
q1.value, q2.to_value(q1.unit), atol=atol.to_value(q1.unit)
)
assert type(out) is np.ndarray
assert out.dtype.kind == "b"
assert np.all(out == expected)
out = np.isclose(q1, q2, atol=0, rtol=rtol)
expected = np.isclose(q1.value, q2.to_value(q1.unit), atol=0, rtol=0.01)
assert type(out) is np.ndarray
assert out.dtype.kind == "b"
assert np.all(out == expected)
@needs_array_function
def test_allclose_atol_default_unit(self):
q_cm = self.q.to(u.cm)
out = np.isclose(self.q, q_cm)
expected = np.isclose(self.q.value, q_cm.to_value(u.m))
assert np.all(out == expected)
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 198.0]) * u.cm
out = np.isclose(q1, q2, atol=0.011, rtol=0)
expected = np.isclose(q1.value, q2.to_value(q1.unit), atol=0.011, rtol=0)
assert np.all(out == expected)
out2 = np.isclose(q2, q1, atol=0.011, rtol=0)
expected2 = np.isclose(q2.value, q1.to_value(q2.unit), atol=0.011, rtol=0)
assert np.all(out2 == expected2)
class TestReductionLikeFunctions(InvariantUnitTestSetup):
def test_average(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
q2 = np.eye(3) / u.s
o = np.average(q1, weights=q2)
expected = np.average(q1.value, weights=q2.value) * u.m
assert np.all(o == expected)
def test_mean(self):
self.check(np.mean)
def test_std(self):
self.check(np.std)
def test_var(self):
o = np.var(self.q)
expected = np.var(self.q.value) * self.q.unit**2
assert np.all(o == expected)
def test_median(self):
self.check(np.median)
@needs_array_function
def test_quantile(self):
self.check(np.quantile, 0.5)
o = np.quantile(self.q, 50 * u.percent)
expected = np.quantile(self.q.value, 0.5) * u.m
assert np.all(o == expected)
# For ndarray input, we return a Quantity.
o2 = np.quantile(self.q.value, 50 * u.percent)
assert o2.unit == u.dimensionless_unscaled
assert np.all(o2 == expected.value)
o3 = 0 * o2
result = np.quantile(self.q, 50 * u.percent, out=o3)
assert result is o3
assert np.all(o3 == expected)
o4 = 0 * o2
result = np.quantile(self.q, 50 * u.percent, None, o4)
assert result is o4
assert np.all(o4 == expected)
@needs_array_function
def test_percentile(self):
self.check(np.percentile, 0.5)
o = np.percentile(self.q, 0.5 * u.one)
expected = np.percentile(self.q.value, 50) * u.m
assert np.all(o == expected)
def test_trace(self):
self.check(np.trace)
@needs_array_function
def test_count_nonzero(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
o = np.count_nonzero(q1)
assert type(o) is not u.Quantity
assert o == 8
o = np.count_nonzero(q1, axis=1)
# Returns integer Quantity with units of m
assert type(o) is np.ndarray
assert np.all(o == np.array([2, 3, 3]))
def test_allclose(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 199.0]) * u.cm
atol = 2 * u.cm
rtol = 1.0 * u.percent
assert np.allclose(q1, q2, atol=atol)
assert np.allclose(q1, q2, atol=0.0, rtol=rtol)
@needs_array_function
def test_allclose_atol_default_unit(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 199.0]) * u.cm
assert np.allclose(q1, q2, atol=0.011, rtol=0)
assert not np.allclose(q2, q1, atol=0.011, rtol=0)
def test_allclose_failures(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 199.0]) * u.cm
with pytest.raises(u.UnitsError):
np.allclose(q1, q2, atol=2 * u.s, rtol=0)
with pytest.raises(u.UnitsError):
np.allclose(q1, q2, atol=0, rtol=1.0 * u.s)
@needs_array_function
def test_array_equal(self):
q1 = np.arange(3.0) * u.m
q2 = q1.to(u.cm)
assert np.array_equal(q1, q2)
q3 = q1.value * u.cm
assert not np.array_equal(q1, q3)
@pytest.mark.parametrize("equal_nan", [False, True])
def test_array_equal_nan(self, equal_nan):
q1 = np.linspace(0, 1, num=11) * u.m
q1[0] = np.nan
q2 = q1.to(u.cm)
result = np.array_equal(q1, q2, equal_nan=equal_nan)
assert result == equal_nan
def test_array_equal_incompatible_units(self):
assert not np.array_equal([1, 2] * u.m, [1, 2] * u.s)
@needs_array_function
def test_array_equiv(self):
q1 = np.array([[0.0, 1.0, 2.0]] * 3) * u.m
q2 = q1[0].to(u.cm)
assert np.array_equiv(q1, q2)
q3 = q1[0].value * u.cm
assert not np.array_equiv(q1, q3)
def test_array_equiv_incompatible_units(self):
assert not np.array_equiv([1, 1] * u.m, [1] * u.s)
class TestNanFunctions(InvariantUnitTestSetup):
def setup_method(self):
super().setup_method()
self.q[1, 1] = np.nan
def test_nanmax(self):
self.check(np.nanmax)
def test_nanmin(self):
self.check(np.nanmin)
def test_nanargmin(self):
out = np.nanargmin(self.q)
expected = np.nanargmin(self.q.value)
assert out == expected
def test_nanargmax(self):
out = np.nanargmax(self.q)
expected = np.nanargmax(self.q.value)
assert out == expected
def test_nanmean(self):
self.check(np.nanmean)
def test_nanmedian(self):
self.check(np.nanmedian)
def test_nansum(self):
self.check(np.nansum)
def test_nancumsum(self):
self.check(np.nancumsum)
def test_nanstd(self):
self.check(np.nanstd)
def test_nanvar(self):
out = np.nanvar(self.q)
expected = np.nanvar(self.q.value) * self.q.unit**2
assert np.all(out == expected)
def test_nanprod(self):
with pytest.raises(u.UnitsError):
np.nanprod(self.q)
def test_nancumprod(self):
with pytest.raises(u.UnitsError):
np.nancumprod(self.q)
@needs_array_function
def test_nanquantile(self):
self.check(np.nanquantile, 0.5)
o = np.nanquantile(self.q, 50 * u.percent)
expected = np.nanquantile(self.q.value, 0.5) * u.m
assert np.all(o == expected)
@needs_array_function
def test_nanpercentile(self):
self.check(np.nanpercentile, 0.5)
o = np.nanpercentile(self.q, 0.5 * u.one)
expected = np.nanpercentile(self.q.value, 50) * u.m
assert np.all(o == expected)
class TestVariousProductFunctions(metaclass=CoverageMeta):
"""
Test functions that are similar to gufuncs
"""
@needs_array_function
def test_cross(self):
q1 = np.arange(6.0).reshape(2, 3) * u.m
q2 = np.array([4.0, 5.0, 6.0]) / u.s
o = np.cross(q1, q2)
expected = np.cross(q1.value, q2.value) * u.m / u.s
assert np.all(o == expected)
@needs_array_function
def test_outer(self):
q1 = np.array([1, 2, 3]) * u.m
q2 = np.array([1, 2]) / u.s
o = np.outer(q1, q2)
assert np.all(o == np.array([[1, 2], [2, 4], [3, 6]]) * u.m / u.s)
o2 = 0 * o
result = np.outer(q1, q2, out=o2)
assert result is o2
assert np.all(o2 == o)
with pytest.raises(TypeError):
np.outer(q1, q2, out=object())
@needs_array_function
def test_inner(self):
q1 = np.array([1, 2, 3]) * u.m
q2 = np.array([4, 5, 6]) / u.s
o = np.inner(q1, q2)
assert o == 32 * u.m / u.s
@needs_array_function
def test_dot(self):
q1 = np.array([1.0, 2.0, 3.0]) * u.m
q2 = np.array([4.0, 5.0, 6.0]) / u.s
o = np.dot(q1, q2)
assert o == 32.0 * u.m / u.s
@needs_array_function
def test_vdot(self):
q1 = np.array([1j, 2j, 3j]) * u.m
q2 = np.array([4j, 5j, 6j]) / u.s
o = np.vdot(q1, q2)
assert o == (32.0 + 0j) * u.m / u.s
@needs_array_function
def test_tensordot(self):
# From the docstring example
a = np.arange(60.0).reshape(3, 4, 5) * u.m
b = np.arange(24.0).reshape(4, 3, 2) / u.s
c = np.tensordot(a, b, axes=([1, 0], [0, 1]))
expected = np.tensordot(a.value, b.value, axes=([1, 0], [0, 1])) * u.m / u.s
assert np.all(c == expected)
@needs_array_function
def test_kron(self):
q1 = np.eye(2) * u.m
q2 = np.ones(2) / u.s
o = np.kron(q1, q2)
expected = np.kron(q1.value, q2.value) * u.m / u.s
assert np.all(o == expected)
@needs_array_function
def test_einsum(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
o = np.einsum("...i", q1)
assert np.all(o == q1)
o = np.einsum("ii", q1)
expected = np.einsum("ii", q1.value) * u.m
assert np.all(o == expected)
q2 = np.eye(3) / u.s
o2 = np.einsum("ij,jk", q1, q2)
assert np.all(o2 == q1 / u.s)
o3 = 0 * o2
result = np.einsum("ij,jk", q1, q2, out=o3)
assert result is o3
assert np.all(o3 == o2)
def test_einsum_path(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
o = np.einsum_path("...i", q1)
assert o[0] == ["einsum_path", (0,)]
o = np.einsum_path("ii", q1)
assert o[0] == ["einsum_path", (0,)]
q2 = np.eye(3) / u.s
o = np.einsum_path("ij,jk", q1, q2)
assert o[0] == ["einsum_path", (0, 1)]
class TestIntDiffFunctions(metaclass=CoverageMeta):
def test_trapz(self):
y = np.arange(9.0) * u.m / u.s
out = np.trapz(y)
expected = np.trapz(y.value) * y.unit
assert np.all(out == expected)
dx = 10.0 * u.s
out = np.trapz(y, dx=dx)
expected = np.trapz(y.value, dx=dx.value) * y.unit * dx.unit
assert np.all(out == expected)
x = np.arange(9.0) * u.s
out = np.trapz(y, x)
expected = np.trapz(y.value, x.value) * y.unit * x.unit
assert np.all(out == expected)
def test_diff(self):
# Simple diff works out of the box.
x = np.arange(10.0) * u.m
out = np.diff(x)
expected = np.diff(x.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_diff_prepend_append(self):
x = np.arange(10.0) * u.m
out = np.diff(x, prepend=-12.5 * u.cm, append=1 * u.km)
expected = np.diff(x.value, prepend=-0.125, append=1000.0) * x.unit
assert np.all(out == expected)
x = np.arange(10.0) * u.m
out = np.diff(x, prepend=-12.5 * u.cm, append=1 * u.km, n=2)
expected = np.diff(x.value, prepend=-0.125, append=1000.0, n=2) * x.unit
assert np.all(out == expected)
with pytest.raises(TypeError):
np.diff(x, prepend=object())
def test_gradient(self):
# Simple gradient works out of the box.
x = np.arange(10.0) * u.m
out = np.gradient(x)
expected = np.gradient(x.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_gradient_spacing(self):
# Simple gradient works out of the box.
x = np.arange(10.0) * u.m
spacing = 10.0 * u.s
out = np.gradient(x, spacing)
expected = np.gradient(x.value, spacing.value) * (x.unit / spacing.unit)
assert np.all(out == expected)
f = np.array([[1, 2, 6], [3, 4, 5]]) * u.m
dx = 2.0 * u.s
y = [1.0, 1.5, 3.5] * u.GHz
dfdx, dfdy = np.gradient(f, dx, y)
exp_dfdx, exp_dfdy = np.gradient(f.value, dx.value, y.value)
exp_dfdx = exp_dfdx * f.unit / dx.unit
exp_dfdy = exp_dfdy * f.unit / y.unit
assert np.all(dfdx == exp_dfdx)
assert np.all(dfdy == exp_dfdy)
dfdx2 = np.gradient(f, dx, axis=0)
assert np.all(dfdx2 == exp_dfdx)
dfdy2 = np.gradient(f, y, axis=(1,))
assert np.all(dfdy2 == exp_dfdy)
class TestSpaceFunctions(metaclass=CoverageMeta):
def test_linspace(self):
# Note: linspace gets unit of end point, not superlogical.
out = np.linspace(1000.0 * u.m, 10.0 * u.km, 5)
expected = np.linspace(1, 10, 5) * u.km
assert np.all(out == expected)
q1 = np.arange(6.0).reshape(2, 3) * u.m
q2 = 10000.0 * u.cm
out = np.linspace(q1, q2, 5)
expected = np.linspace(q1.to_value(q2.unit), q2.value, 5) * q2.unit
assert np.all(out == expected)
@needs_array_function
def test_logspace(self):
unit = u.m / u.s**2
out = np.logspace(10.0 * u.dex(unit), 20 * u.dex(unit), 10)
expected = np.logspace(10.0, 20.0, 10) * unit
assert np.all(out == expected)
out = np.logspace(10.0 * u.STmag, 20 * u.STmag, 10)
expected = np.logspace(10.0, 20.0, 10, base=10.0 ** (-0.4)) * u.ST
assert u.allclose(out, expected)
@needs_array_function
def test_geomspace(self):
out = np.geomspace(1000.0 * u.m, 10.0 * u.km, 5)
expected = np.geomspace(1, 10, 5) * u.km
assert np.all(out == expected)
q1 = np.arange(1.0, 7.0).reshape(2, 3) * u.m
q2 = 10000.0 * u.cm
out = np.geomspace(q1, q2, 5)
expected = np.geomspace(q1.to_value(q2.unit), q2.value, 5) * q2.unit
assert np.all(out == expected)
class TestInterpolationFunctions(metaclass=CoverageMeta):
@needs_array_function
def test_interp(self):
x = np.array([1250.0, 2750.0]) * u.m
xp = np.arange(5.0) * u.km
yp = np.arange(5.0) * u.day
out = np.interp(x, xp, yp)
expected = np.interp(x.to_value(xp.unit), xp.value, yp.value) * yp.unit
assert np.all(out == expected)
out = np.interp(x, xp, yp.value)
assert type(out) is np.ndarray
assert np.all(out == expected.value)
@needs_array_function
def test_piecewise(self):
x = np.linspace(-2.5, 2.5, 6) * u.m
out = np.piecewise(x, [x < 0, x >= 0], [-1 * u.s, 1 * u.day])
expected = (
np.piecewise(x.value, [x.value < 0, x.value >= 0], [-1, 24 * 3600]) * u.s
)
assert out.unit == expected.unit
assert np.all(out == expected)
out2 = np.piecewise(
x, [x < 1 * u.m, x >= 0], [-1 * u.s, 1 * u.day, lambda x: 1 * u.hour]
)
expected2 = (
np.piecewise(x.value, [x.value < 1, x.value >= 0], [-1, 24 * 3600, 3600])
* u.s
)
assert out2.unit == expected2.unit
assert np.all(out2 == expected2)
out3 = np.piecewise(
x, [x < 1 * u.m, x >= 0], [0, 1 * u.percent, lambda x: 1 * u.one]
)
expected3 = (
np.piecewise(x.value, [x.value < 1, x.value >= 0], [0, 0.01, 1]) * u.one
)
assert out3.unit == expected3.unit
assert np.all(out3 == expected3)
with pytest.raises(TypeError): # no Quantity in condlist.
np.piecewise(x, [x], [0.0])
with pytest.raises(TypeError): # no Quantity in condlist.
np.piecewise(x.value, [x], [0.0])
class TestBincountDigitize(metaclass=CoverageMeta):
@needs_array_function
def test_bincount(self):
i = np.array([1, 1, 2, 3, 2, 4])
weights = np.arange(len(i)) * u.Jy
out = np.bincount(i, weights)
expected = np.bincount(i, weights.value) * weights.unit
assert_array_equal(out, expected)
with pytest.raises(TypeError):
np.bincount(weights)
@needs_array_function
def test_digitize(self):
x = np.array([1500.0, 2500.0, 4500.0]) * u.m
bins = np.arange(10.0) * u.km
out = np.digitize(x, bins)
expected = np.digitize(x.to_value(bins.unit), bins.value)
assert_array_equal(out, expected)
class TestHistogramFunctions(metaclass=CoverageMeta):
def setup_method(self):
self.x = np.array([1.1, 1.2, 1.3, 2.1, 5.1]) * u.m
self.y = np.array([1.2, 2.2, 2.4, 3.0, 4.0]) * u.cm
self.weights = np.arange(len(self.x)) / u.s
def check(
self,
function,
*args,
value_args=None,
value_kwargs=None,
expected_units=None,
**kwargs
):
"""Check quanties are treated correctly in the histogram function.
Test is done by applying ``function(*args, **kwargs)``, where
the argument can be quantities, and comparing the result to
``function(*value_args, **value_kwargs)``, with the outputs
converted to quantities using the ``expected_units`` (where `None`
indicates the output is expected to be a regular array).
For ``**value_kwargs``, any regular ``kwargs`` are treated as
defaults, i.e., non-quantity arguments do not have to be repeated.
"""
if value_kwargs is None:
value_kwargs = kwargs
else:
for k, v in kwargs.items():
value_kwargs.setdefault(k, v)
# Get the result, using the Quantity override.
out = function(*args, **kwargs)
# Get the comparison, with non-Quantity arguments.
expected = function(*value_args, **value_kwargs)
# All histogram functions return a tuple of the actual histogram
# and the bin edges. First, check the actual histogram.
out_h = out[0]
expected_h = expected[0]
if expected_units[0] is not None:
expected_h = expected_h * expected_units[0]
assert_array_equal(out_h, expected_h)
# Check bin edges. Here, histogramdd returns an interable of the
# bin edges as the second return argument, while histogram and
# histogram2d return the bin edges directly.
if function is np.histogramdd:
bin_slice = 1
else:
bin_slice = slice(1, None)
for o_bin, e_bin, e_unit in zip(
out[bin_slice], expected[bin_slice], expected_units[bin_slice]
):
if e_unit is not None:
e_bin = e_bin * e_unit
assert_array_equal(o_bin, e_bin)
@needs_array_function
def test_histogram(self):
x = self.x
weights = self.weights
# Plain histogram.
self.check(
np.histogram, x, value_args=(x.value,), expected_units=(None, x.unit)
)
# With bins.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
value_args=(x.value, [1.25, 2.0]),
expected_units=(None, x.unit),
)
# With density.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
density=True,
value_args=(x.value, [1.25, 2.0]),
expected_units=(1 / x.unit, x.unit),
)
# With weights.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
weights=weights,
value_args=(x.value, [1.25, 2.0]),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, x.unit),
)
# With weights and density.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
weights=weights,
density=True,
value_args=(x.value, [1.25, 2.0]),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit / x.unit, x.unit),
)
with pytest.raises(u.UnitsError):
np.histogram(x, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram(x, [125, 200])
with pytest.raises(u.UnitsError):
np.histogram(x.value, [125, 200] * u.s)
@classmethod
def _range_value(cls, range, unit):
if isinstance(range, u.Quantity):
return range.to_value(unit)
else:
return [cls._range_value(r, unit) for r in range]
@pytest.mark.parametrize("range", [[2 * u.m, 500 * u.cm], [2, 5] * u.m])
@needs_array_function
def test_histogram_range(self, range):
self.check(
np.histogram,
self.x,
range=range,
value_args=[self.x.value],
value_kwargs=dict(range=self._range_value(range, self.x.unit)),
expected_units=(None, self.x.unit),
)
@needs_array_function
def test_histogram_bin_edges(self):
x = np.array([1.1, 1.2, 1.3, 2.1, 5.1]) * u.m
out_b = np.histogram_bin_edges(x)
expected_b = np.histogram_bin_edges(x.value) * x.unit
assert np.all(out_b == expected_b)
# With bins
out2_b = np.histogram_bin_edges(x, [125, 200] * u.cm)
expected2_b = np.histogram_bin_edges(x.value, [1.25, 2.0]) * x.unit
assert np.all(out2_b == expected2_b)
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x, [125, 200])
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x.value, [125, 200] * u.s)
@pytest.mark.parametrize("range", [[2 * u.m, 500 * u.cm], [2, 5] * u.m])
@needs_array_function
def test_histogram_bin_edges_range(self, range):
out_b = np.histogram_bin_edges(self.x, range=range)
expected_b = np.histogram_bin_edges(
self.x.value, range=self._range_value(range, self.x.unit)
)
assert np.all(out_b.value == expected_b)
@needs_array_function
def test_histogram2d(self):
x, y = self.x, self.y
weights = self.weights
# Basic tests with X, Y.
self.check(
np.histogram2d,
x,
y,
value_args=(x.value, y.value),
expected_units=(None, x.unit, y.unit),
)
# Check units with density.
self.check(
np.histogram2d,
x,
y,
density=True,
value_args=(x.value, y.value),
expected_units=(1 / (x.unit * y.unit), x.unit, y.unit),
)
# Check units with weights.
self.check(
np.histogram2d,
x,
y,
weights=weights,
value_args=(x.value, y.value),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, x.unit, y.unit),
)
# Check quantity bin sizes.
inb_y = [0, 0.025, 1.0] * u.m
self.check(
np.histogram2d,
x,
y,
[5, inb_y],
value_args=(x.value, y.value, [5, np.array([0, 2.5, 100.0])]),
expected_units=(None, x.unit, y.unit),
)
# Check we dispatch on bin sizes (and check kwarg as well).
inb2_y = [0, 250, 10000.0] * u.percent
self.check(
np.histogram2d,
x.value,
y.value,
bins=[5, inb2_y],
value_args=(x.value, y.value),
value_kwargs=dict(bins=[5, np.array([0, 2.5, 100.0])]),
expected_units=(None, u.one, u.one),
)
# Single-item bins should be integer, not Quantity.
with pytest.raises(TypeError):
np.histogram2d(x, y, 125 * u.s)
with pytest.raises(TypeError):
np.histogram2d(x.value, y.value, 125 * u.s)
# Bin units need to match units of x, y.
with pytest.raises(u.UnitsError):
np.histogram2d(x, y, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram2d(x, y, ([125, 200], [125, 200]))
with pytest.raises(u.UnitsError):
np.histogram2d(x.value, y.value, [125, 200] * u.s)
@pytest.mark.parametrize(
argnames="range",
argvalues=[
[[2 * u.m, 500 * u.cm], [1 * u.cm, 40 * u.mm]],
[[200, 500] * u.cm, [10, 40] * u.mm],
[[200, 500], [1, 4]] * u.cm,
],
)
@needs_array_function
def test_histogram2d_range(self, range):
self.check(
np.histogram2d,
self.x,
self.y,
range=range,
value_args=[self.x.value, self.y.value],
value_kwargs=dict(
range=[
self._range_value(r, un)
for (r, un) in zip(range, (self.x.unit, self.y.unit))
]
),
expected_units=(None, self.x.unit, self.y.unit),
)
@needs_array_function
def test_histogramdd(self):
# First replicates of the histogram2d tests, but using the
# histogramdd override. Normally takes the sample as a tuple
# with a given number of dimensions, and returns the histogram
# as well as a tuple of bin edges.
sample = self.x, self.y
sample_units = self.x.unit, self.y.unit
sample_values = (self.x.value, self.y.value)
weights = self.weights
# Basic tests with X, Y
self.check(
np.histogramdd,
sample,
value_args=(sample_values,),
expected_units=(None, sample_units),
)
# Check units with density.
self.check(
np.histogramdd,
sample,
density=True,
value_args=(sample_values,),
expected_units=(1 / (self.x.unit * self.y.unit), sample_units),
)
# Check units with weights.
self.check(
np.histogramdd,
sample,
weights=weights,
value_args=(sample_values,),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, sample_units),
)
# Check quantity bin sizes.
inb_y = [0, 0.025, 1.0] * u.m
self.check(
np.histogramdd,
sample,
[5, inb_y],
value_args=(sample_values, [5, np.array([0, 2.5, 100.0])]),
expected_units=(None, sample_units),
)
# Check we dispatch on bin sizes (and check kwarg as well).
inb2_y = [0, 250, 10000.0] * u.percent
self.check(
np.histogramdd,
sample_values,
bins=[5, inb2_y],
value_args=(sample_values,),
value_kwargs=dict(bins=[5, np.array([0, 2.5, 100.0])]),
expected_units=(None, (u.one, u.one)),
)
# For quantities, it is probably not that likely one would pass
# in the sample as an array, but check that it works anyway.
# This also gives a 3-D check.
xyz = np.random.normal(size=(10, 3)) * u.m
self.check(
np.histogramdd,
xyz,
value_args=(xyz.value,),
expected_units=(None, (xyz.unit,) * 3),
)
# Passing it in as a tuple should work just as well; note the
# *last* axis contains the sample dimension.
self.check(
np.histogramdd,
(xyz[:, 0], xyz[:, 1], xyz[:, 2]),
value_args=(xyz.value,),
expected_units=(None, (xyz.unit,) * 3),
)
# Single-item bins should be integer, not Quantity.
with pytest.raises(TypeError):
np.histogramdd(sample, 125 * u.s)
# Sequence of single items should be integer.
with pytest.raises(TypeError):
np.histogramdd(sample, [125, 200] * u.s)
with pytest.raises(TypeError):
np.histogramdd(sample_values, [125, 200] * u.s)
# Units of bins should match.
with pytest.raises(u.UnitsError):
np.histogramdd(sample, ([125, 200], [125, 200]))
with pytest.raises(u.UnitsError):
np.histogramdd(sample_values, ([125, 200] * u.s, [125, 200]))
@pytest.mark.parametrize(
argnames="range",
argvalues=[
[[2 * u.m, 500 * u.cm], [1 * u.cm, 40 * u.mm]],
[[200, 500] * u.cm, [10, 40] * u.mm],
[[200, 500], [1, 4]] * u.cm,
],
)
@needs_array_function
def test_histogramdd_range(self, range):
self.check(
np.histogramdd,
(self.x, self.y),
range=range,
value_args=[(self.x.value, self.y.value)],
value_kwargs=dict(
range=[
self._range_value(r, un)
for (r, un) in zip(range, (self.x.unit, self.y.unit))
]
),
expected_units=(None, (self.x.unit, self.y.unit)),
)
@needs_array_function
def test_correlate(self):
x1 = [1, 2, 3] * u.m
x2 = [0, 1, 0.5] * u.m
out = np.correlate(x1, x2)
expected = np.correlate(x1.value, x2.value) * u.m**2
assert np.all(out == expected)
@needs_array_function
def test_convolve(self):
x1 = [1, 2, 3] * u.m
x2 = [0, 1, 0.5] * u.m
out = np.convolve(x1, x2)
expected = np.convolve(x1.value, x2.value) * u.m**2
assert np.all(out == expected)
@needs_array_function
def test_cov(self):
# Do not see how we can use cov with Quantity
x = np.array([[0, 2], [1, 1], [2, 0]]).T * u.m
with pytest.raises(TypeError):
np.cov(x)
@needs_array_function
def test_corrcoef(self):
# Do not see how we can use cov with Quantity
x = np.array([[0, 2], [1, 1], [2, 0]]).T * u.m
with pytest.raises(TypeError):
np.corrcoef(x)
class TestSortFunctions(InvariantUnitTestSetup):
def test_sort(self):
self.check(np.sort)
def test_sort_axis(self):
self.check(np.sort, axis=0)
@pytest.mark.skipif(not NUMPY_LT_1_24, reason="np.msort is deprecated")
def test_msort(self):
self.check(np.msort)
@needs_array_function
def test_sort_complex(self):
self.check(np.sort_complex)
def test_partition(self):
self.check(np.partition, 2)
class TestStringFunctions(metaclass=CoverageMeta):
# For these, making behaviour work means deviating only slightly from
# the docstring, and by default they fail miserably. So, might as well.
def setup_method(self):
self.q = np.arange(3.0) * u.Jy
@needs_array_function
def test_array2string(self):
# The default formatters cannot handle units, so if we do not pass
# a relevant formatter, we are better off just treating it as an
# array (which happens for all subtypes).
out0 = np.array2string(self.q)
expected0 = str(self.q.value)
assert out0 == expected0
# Arguments are interpreted as usual.
out1 = np.array2string(self.q, separator=", ")
expected1 = "[0., 1., 2.]"
assert out1 == expected1
# If we do pass in a formatter, though, it should be used.
out2 = np.array2string(self.q, separator=", ", formatter={"all": str})
expected2 = "[0.0 Jy, 1.0 Jy, 2.0 Jy]"
assert out2 == expected2
# Also as positional argument (no, nobody will do this!)
out3 = np.array2string(
self.q, None, None, None, ", ", "", np._NoValue, {"float": str}
)
assert out3 == expected2
# But not if the formatter is not relevant for us.
out4 = np.array2string(self.q, separator=", ", formatter={"int": str})
assert out4 == expected1
@needs_array_function
def test_array_repr(self):
out = np.array_repr(self.q)
assert out == "Quantity([0., 1., 2.], unit='Jy')"
q2 = self.q.astype("f4")
out2 = np.array_repr(q2)
assert out2 == "Quantity([0., 1., 2.], unit='Jy', dtype=float32)"
@needs_array_function
def test_array_str(self):
out = np.array_str(self.q)
expected = str(self.q)
assert out == expected
class TestBitAndIndexFunctions(metaclass=CoverageMeta):
# Index/bit functions generally fail for floats, so the usual
# float quantity are safe, but the integer ones are not.
def setup_method(self):
self.q = np.arange(3) * u.m
self.uint_q = u.Quantity(np.arange(3), "m", dtype="u1")
@needs_array_function
def test_packbits(self):
with pytest.raises(TypeError):
np.packbits(self.q)
with pytest.raises(TypeError):
np.packbits(self.uint_q)
@needs_array_function
def test_unpackbits(self):
with pytest.raises(TypeError):
np.unpackbits(self.q)
with pytest.raises(TypeError):
np.unpackbits(self.uint_q)
@needs_array_function
def test_unravel_index(self):
with pytest.raises(TypeError):
np.unravel_index(self.q, 3)
with pytest.raises(TypeError):
np.unravel_index(self.uint_q, 3)
@needs_array_function
def test_ravel_multi_index(self):
with pytest.raises(TypeError):
np.ravel_multi_index((self.q,), 3)
with pytest.raises(TypeError):
np.ravel_multi_index((self.uint_q,), 3)
@needs_array_function
def test_ix_(self):
with pytest.raises(TypeError):
np.ix_(self.q)
with pytest.raises(TypeError):
np.ix_(self.uint_q)
class TestDtypeFunctions(NoUnitTestSetup):
def test_common_type(self):
self.check(np.common_type)
def test_result_type(self):
self.check(np.result_type)
def test_can_cast(self):
self.check(np.can_cast, self.q.dtype)
self.check(np.can_cast, "f4")
def test_min_scalar_type(self):
out = np.min_scalar_type(self.q[0])
expected = np.min_scalar_type(self.q.value[0])
assert out == expected
def test_iscomplexobj(self):
self.check(np.iscomplexobj)
def test_isrealobj(self):
self.check(np.isrealobj)
class TestMeshGrid(metaclass=CoverageMeta):
def test_meshgrid(self):
q1 = np.arange(3.0) * u.m
q2 = np.arange(5.0) * u.s
o1, o2 = np.meshgrid(q1, q2)
e1, e2 = np.meshgrid(q1.value, q2.value)
assert np.all(o1 == e1 * q1.unit)
assert np.all(o2 == e2 * q2.unit)
class TestMemoryFunctions(NoUnitTestSetup):
def test_shares_memory(self):
self.check(np.shares_memory, self.q.value)
def test_may_share_memory(self):
self.check(np.may_share_memory, self.q.value)
class TestSetOpsFcuntions(metaclass=CoverageMeta):
def setup_method(self):
self.q = np.array([[0.0, 1.0, -1.0], [3.0, 5.0, 3.0], [0.0, 1.0, -1]]) * u.m
self.q2 = np.array([0.0, 100.0, 150.0, 200.0]) * u.cm
def check(self, function, qs, *args, **kwargs):
unit = kwargs.pop("unit", self.q.unit)
out = function(*qs, *args, **kwargs)
qv = tuple(q.to_value(self.q.unit) for q in qs)
expected = function(*qv, *args, **kwargs)
if isinstance(expected, tuple):
if unit:
expected = (expected[0] * unit,) + expected[1:]
for o, e in zip(out, expected):
assert_array_equal(o, e)
else:
if unit:
expected = expected * unit
assert_array_equal(out, expected)
def check1(self, function, *args, **kwargs):
self.check(function, (self.q,), *args, **kwargs)
def check2(self, function, *args, **kwargs):
self.check(function, (self.q, self.q2), *args, **kwargs)
@pytest.mark.parametrize(
"kwargs",
(
dict(return_index=True, return_inverse=True),
dict(return_counts=True),
dict(return_index=True, return_inverse=True, return_counts=True),
),
)
def test_unique(self, kwargs):
self.check1(np.unique, **kwargs)
@needs_array_function
@pytest.mark.parametrize(
"kwargs",
(
dict(axis=0),
dict(axis=1),
dict(return_counts=True, return_inverse=False, axis=1),
),
)
def test_unique_more_complex(self, kwargs):
self.check1(np.unique, **kwargs)
@needs_array_function
@pytest.mark.parametrize("kwargs", (dict(), dict(return_indices=True)))
def test_intersect1d(self, kwargs):
self.check2(np.intersect1d, **kwargs)
@needs_array_function
def test_setxor1d(self):
self.check2(np.setxor1d)
@needs_array_function
def test_union1d(self):
self.check2(np.union1d)
result = np.union1d(np.array([0.0, np.nan]), np.arange(3) << u.m)
assert result.unit is u.m
assert_array_equal(result.value, np.array([0.0, 1.0, 2.0, np.nan]))
@needs_array_function
def test_setdiff1d(self):
self.check2(np.setdiff1d)
@needs_array_function
def test_in1d(self):
self.check2(np.in1d, unit=None)
# Check zero is treated as having any unit.
assert np.in1d(np.zeros(1), self.q2)
with pytest.raises(u.UnitsError):
np.in1d(np.ones(1), self.q2)
@needs_array_function
def test_isin(self):
self.check2(np.isin, unit=None)
def test_ediff1d(self):
# ediff1d works always as it calls the Quantity method.
self.check1(np.ediff1d)
x = np.arange(10.0) * u.m
out = np.ediff1d(x, to_begin=-12.5 * u.cm, to_end=1 * u.km)
expected = np.ediff1d(x.value, to_begin=-0.125, to_end=1000.0) * x.unit
assert_array_equal(out, expected)
class TestDatetimeFunctions(BasicTestSetup):
def test_busday_count(self):
with pytest.raises(TypeError):
np.busday_count(self.q, self.q)
def test_busday_offset(self):
with pytest.raises(TypeError):
np.busday_offset(self.q, self.q)
def test_datetime_as_string(self):
with pytest.raises(TypeError):
np.datetime_as_string(self.q)
def test_is_busday(self):
with pytest.raises(TypeError):
np.is_busday(self.q)
# These functions always worked; ensure they do not regress.
# Note that they are *not* wrapped so no need to check coverage.
@pytest.mark.parametrize("function", [np.fft.fftfreq, np.fft.rfftfreq])
def test_fft_frequencies(function):
out = function(128, d=0.1 * u.s)
expected = function(128, d=0.1) / u.s
assert_array_equal(out, expected)
@needs_array_function
class TestFFT(InvariantUnitTestSetup):
# These are all trivial, just preserve the unit.
def setup_method(self):
# Use real input; gets turned into complex as needed.
self.q = np.arange(128.0).reshape(8, -1) * u.s
def test_fft(self):
self.check(np.fft.fft)
def test_ifft(self):
self.check(np.fft.ifft)
def test_rfft(self):
self.check(np.fft.rfft)
def test_irfft(self):
self.check(np.fft.irfft)
def test_fft2(self):
self.check(np.fft.fft2)
def test_ifft2(self):
self.check(np.fft.ifft2)
def test_rfft2(self):
self.check(np.fft.rfft2)
def test_irfft2(self):
self.check(np.fft.irfft2)
def test_fftn(self):
self.check(np.fft.fftn)
def test_ifftn(self):
self.check(np.fft.ifftn)
def test_rfftn(self):
self.check(np.fft.rfftn)
def test_irfftn(self):
self.check(np.fft.irfftn)
def test_hfft(self):
self.check(np.fft.hfft)
def test_ihfft(self):
self.check(np.fft.ihfft)
def test_fftshift(self):
self.check(np.fft.fftshift)
def test_ifftshift(self):
self.check(np.fft.ifftshift)
class TestLinAlg(metaclass=CoverageMeta):
def setup_method(self):
self.q = (
np.array(
[[ 1.0, -1.0, 2.0],
[ 0.0, 3.0, -1.0],
[-1.0, -1.0, 1.0]]
) << u.m
) # fmt: skip
def test_cond(self):
c = np.linalg.cond(self.q)
expected = np.linalg.cond(self.q.value)
assert c == expected
def test_matrix_rank(self):
r = np.linalg.matrix_rank(self.q)
x = np.linalg.matrix_rank(self.q.value)
assert r == x
@needs_array_function
def test_matrix_rank_with_tol(self):
# Use a matrix that is not so good, so tol=1 and tol=0.01 differ.
q = np.arange(9.0).reshape(3, 3) / 4 * u.m
tol = 1.0 * u.cm
r2 = np.linalg.matrix_rank(q, tol)
x2 = np.linalg.matrix_rank(q.value, tol.to_value(q.unit))
assert r2 == x2
def test_matrix_power(self):
q1 = np.linalg.matrix_power(self.q, 1)
assert_array_equal(q1, self.q)
q2 = np.linalg.matrix_power(self.q, 2)
assert_array_equal(q2, self.q @ self.q)
q2 = np.linalg.matrix_power(self.q, 4)
assert_array_equal(q2, self.q @ self.q @ self.q @ self.q)
@needs_array_function
def test_matrix_inv_power(self):
qinv = np.linalg.inv(self.q.value) / self.q.unit
qm1 = np.linalg.matrix_power(self.q, -1)
assert_array_equal(qm1, qinv)
qm3 = np.linalg.matrix_power(self.q, -3)
assert_array_equal(qm3, qinv @ qinv @ qinv)
@needs_array_function
def test_multi_dot(self):
q2 = np.linalg.multi_dot([self.q, self.q])
q2x = self.q @ self.q
assert_array_equal(q2, q2x)
q3 = np.linalg.multi_dot([self.q, self.q, self.q])
q3x = self.q @ self.q @ self.q
assert_array_equal(q3, q3x)
@needs_array_function
def test_svd(self):
m = np.arange(10.0) * np.arange(5.0)[:, np.newaxis] * u.m
svd_u, svd_s, svd_vt = np.linalg.svd(m, full_matrices=False)
svd_ux, svd_sx, svd_vtx = np.linalg.svd(m.value, full_matrices=False)
svd_sx <<= m.unit
assert_array_equal(svd_u, svd_ux)
assert_array_equal(svd_vt, svd_vtx)
assert_array_equal(svd_s, svd_sx)
assert u.allclose(svd_u @ np.diag(svd_s) @ svd_vt, m)
s2 = np.linalg.svd(m, compute_uv=False)
svd_s2x = np.linalg.svd(m.value, compute_uv=False) << m.unit
assert_array_equal(s2, svd_s2x)
@needs_array_function
def test_inv(self):
inv = np.linalg.inv(self.q)
expected = np.linalg.inv(self.q.value) / self.q.unit
assert_array_equal(inv, expected)
@needs_array_function
def test_pinv(self):
pinv = np.linalg.pinv(self.q)
expected = np.linalg.pinv(self.q.value) / self.q.unit
assert_array_equal(pinv, expected)
rcond = 0.01 * u.cm
pinv2 = np.linalg.pinv(self.q, rcond)
expected2 = (
np.linalg.pinv(self.q.value, rcond.to_value(self.q.unit)) / self.q.unit
)
assert_array_equal(pinv2, expected2)
@needs_array_function
def test_tensorinv(self):
inv = np.linalg.tensorinv(self.q, ind=1)
expected = np.linalg.tensorinv(self.q.value, ind=1) / self.q.unit
assert_array_equal(inv, expected)
@needs_array_function
def test_det(self):
det = np.linalg.det(self.q)
expected = np.linalg.det(self.q.value)
expected <<= self.q.unit ** self.q.shape[-1]
assert_array_equal(det, expected)
with pytest.raises(np.linalg.LinAlgError):
np.linalg.det(self.q[0]) # Not 2-D
with pytest.raises(np.linalg.LinAlgError):
np.linalg.det(self.q[:-1]) # Not square.
@needs_array_function
def test_slogdet(self):
# TODO: Could be supported if we had a natural logarithm unit.
with pytest.raises(TypeError):
logdet = np.linalg.slogdet(self.q)
assert hasattr(logdet, "unit")
@needs_array_function
def test_solve(self):
b = np.array([1.0, 2.0, 4.0]) * u.m / u.s
x = np.linalg.solve(self.q, b)
xx = np.linalg.solve(self.q.value, b.value)
xx <<= b.unit / self.q.unit
assert_array_equal(x, xx)
assert u.allclose(self.q @ x, b)
@needs_array_function
def test_tensorsolve(self):
b = np.array([1.0, 2.0, 4.0]) * u.m / u.s
x = np.linalg.tensorsolve(self.q, b)
xx = np.linalg.tensorsolve(self.q.value, b.value)
xx <<= b.unit / self.q.unit
assert_array_equal(x, xx)
assert u.allclose(self.q @ x, b)
@needs_array_function
def test_lstsq(self):
b = np.array([1.0, 2.0, 4.0]) * u.m / u.s
x, residuals, rank, s = np.linalg.lstsq(self.q, b, rcond=None)
xx, residualsx, rankx, sx = np.linalg.lstsq(self.q.value, b.value, rcond=None)
xx <<= b.unit / self.q.unit
residualsx <<= b.unit**2
sx <<= self.q.unit
assert_array_equal(x, xx)
assert_array_equal(residuals, residualsx)
assert_array_equal(s, sx)
assert rank == rankx
assert u.allclose(self.q @ x, b)
# Also do one where we can check the answer...
m = np.eye(3)
b = np.arange(3) * u.m
x, residuals, rank, s = np.linalg.lstsq(m, b, rcond=1.0 * u.percent)
assert_array_equal(x, b)
assert np.all(residuals == 0 * u.m**2)
assert rank == 3
assert_array_equal(s, np.array([1.0, 1.0, 1.0]) << u.one)
with pytest.raises(u.UnitsError):
np.linalg.lstsq(m, b, rcond=1.0 * u.s)
@needs_array_function
def test_norm(self):
n = np.linalg.norm(self.q)
expected = np.linalg.norm(self.q.value) << self.q.unit
assert_array_equal(n, expected)
# Special case: 1-D, ord=0.
n1 = np.linalg.norm(self.q[0], ord=0)
expected1 = np.linalg.norm(self.q[0].value, ord=0) << u.one
assert_array_equal(n1, expected1)
@needs_array_function
def test_cholesky(self):
# Numbers from np.linalg.cholesky docstring.
q = np.array([[1, -2j], [2j, 5]]) * u.m
cd = np.linalg.cholesky(q)
cdx = np.linalg.cholesky(q.value) << q.unit**0.5
assert_array_equal(cd, cdx)
assert u.allclose(cd @ cd.T.conj(), q)
@needs_array_function
def test_qr(self):
# This is not exhaustive...
a = np.array([[1, -2j], [2j, 5]]) * u.m
q, r = np.linalg.qr(a)
qx, rx = np.linalg.qr(a.value)
qx <<= u.one
rx <<= a.unit
assert_array_equal(q, qx)
assert_array_equal(r, rx)
assert u.allclose(q @ r, a)
@needs_array_function
def test_eig(self):
w, v = np.linalg.eig(self.q)
wx, vx = np.linalg.eig(self.q.value)
wx <<= self.q.unit
vx <<= u.one
assert_array_equal(w, wx)
assert_array_equal(v, vx)
# Comprehensible example
q = np.diag((1, 2, 3) * u.m)
w, v = np.linalg.eig(q)
assert_array_equal(w, np.arange(1, 4) * u.m)
assert_array_equal(v, np.eye(3))
@needs_array_function
def test_eigvals(self):
w = np.linalg.eigvals(self.q)
wx = np.linalg.eigvals(self.q.value) << self.q.unit
assert_array_equal(w, wx)
# Comprehensible example
q = np.diag((1, 2, 3) * u.m)
w = np.linalg.eigvals(q)
assert_array_equal(w, np.arange(1, 4) * u.m)
@needs_array_function
def test_eigh(self):
w, v = np.linalg.eigh(self.q)
wx, vx = np.linalg.eigh(self.q.value)
wx <<= self.q.unit
vx <<= u.one
assert_array_equal(w, wx)
assert_array_equal(v, vx)
@needs_array_function
def test_eigvalsh(self):
w = np.linalg.eigvalsh(self.q)
wx = np.linalg.eigvalsh(self.q.value) << self.q.unit
assert_array_equal(w, wx)
class TestRecFunctions(metaclass=CoverageMeta):
@classmethod
def setup_class(self):
self.pv_dtype = np.dtype([("p", "f8"), ("v", "f8")])
self.pv_t_dtype = np.dtype(
[("pv", np.dtype([("pp", "f8"), ("vv", "f8")])), ("t", "f8")]
)
self.pv = np.array([(1.0, 0.25), (2.0, 0.5), (3.0, 0.75)], self.pv_dtype)
self.pv_t = np.array(
[((4.0, 2.5), 0.0), ((5.0, 5.0), 1.0), ((6.0, 7.5), 2.0)], self.pv_t_dtype
)
self.pv_unit = u.StructuredUnit((u.km, u.km / u.s), ("p", "v"))
self.pv_t_unit = u.StructuredUnit((self.pv_unit, u.s), ("pv", "t"))
self.q_pv = self.pv << self.pv_unit
self.q_pv_t = self.pv_t << self.pv_t_unit
def test_structured_to_unstructured(self):
# can't unstructure something with incompatible units
with pytest.raises(u.UnitConversionError, match="'m'"):
rfn.structured_to_unstructured(u.Quantity((0, 0.6), u.Unit("(eV, m)")))
# it works if all the units are equal
struct = u.Quantity((0, 0, 0.6), u.Unit("(eV, eV, eV)"))
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [0, 0, 0.6] * u.eV)
# also if the units are convertible
struct = u.Quantity((0, 0, 0.6), u.Unit("(eV, eV, keV)"))
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [0, 0, 600] * u.eV)
struct = u.Quantity((0, 0, 1.7827e-33), u.Unit("(eV, eV, g)"))
with u.add_enabled_equivalencies(u.mass_energy()):
unstruct = rfn.structured_to_unstructured(struct)
u.allclose(unstruct, [0, 0, 1.0000214] * u.eV)
# and if the dtype is nested
struct = [(5, (400.0, 3e6))] * u.Unit("m, (cm, um)")
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [[5, 4, 3]] * u.m)
# For the other tests of ``structured_to_unstructured``, see
# ``test_structured.TestStructuredQuantityFunctions.test_structured_to_unstructured``
def test_unstructured_to_structured(self):
unstruct = [1, 2, 3] * u.m
dtype = np.dtype([("f1", float), ("f2", float), ("f3", float)])
# It works.
struct = rfn.unstructured_to_structured(unstruct, dtype=dtype)
assert struct.unit == u.Unit("(m, m, m)")
assert_array_equal(rfn.structured_to_unstructured(struct), unstruct)
# Can't structure something that's already structured.
with pytest.raises(ValueError, match="arr must have at least one dimension"):
rfn.unstructured_to_structured(struct, dtype=dtype)
# For the other tests of ``structured_to_unstructured``, see
# ``test_structured.TestStructuredQuantityFunctions.test_unstructured_to_structured``
def test_merge_arrays_repeat_dtypes(self):
# Cannot merge things with repeat dtypes.
q1 = u.Quantity([(1,)], dtype=[("f1", float)])
q2 = u.Quantity([(1,)], dtype=[("f1", float)])
with pytest.raises(ValueError, match="field 'f1' occurs more than once"):
rfn.merge_arrays((q1, q2))
@pytest.mark.parametrize("flatten", [True, False])
def test_merge_arrays(self, flatten):
"""Test `numpy.lib.recfunctions.merge_arrays`."""
# Merge single normal array.
arr = rfn.merge_arrays(self.q_pv["p"], flatten=flatten)
assert_array_equal(arr["f0"], self.q_pv["p"])
assert arr.unit == (u.km,)
# Merge single structured array.
arr = rfn.merge_arrays(self.q_pv, flatten=flatten)
assert_array_equal(arr, self.q_pv)
assert arr.unit == (u.km, u.km / u.s)
# Merge 1-element tuple.
arr = rfn.merge_arrays((self.q_pv,), flatten=flatten)
assert np.array_equal(arr, self.q_pv)
assert arr.unit == (u.km, u.km / u.s)
@pytest.mark.xfail
@pytest.mark.parametrize("flatten", [True, False])
def test_merge_arrays_nonquantities(self, flatten):
# Fails because cannot create quantity from structured array.
arr = rfn.merge_arrays((q_pv["p"], q_pv.value), flatten=flatten)
def test_merge_array_nested_structure(self):
# Merge 2-element tuples without flattening.
arr = rfn.merge_arrays((self.q_pv, self.q_pv_t))
assert_array_equal(arr["f0"], self.q_pv)
assert_array_equal(arr["f1"], self.q_pv_t)
assert arr.unit == ((u.km, u.km / u.s), ((u.km, u.km / u.s), u.s))
def test_merge_arrays_flatten_nested_structure(self):
# Merge 2-element tuple, flattening it.
arr = rfn.merge_arrays((self.q_pv, self.q_pv_t), flatten=True)
assert_array_equal(arr["p"], self.q_pv["p"])
assert_array_equal(arr["v"], self.q_pv["v"])
assert_array_equal(arr["pp"], self.q_pv_t["pv"]["pp"])
assert_array_equal(arr["vv"], self.q_pv_t["pv"]["vv"])
assert_array_equal(arr["t"], self.q_pv_t["t"])
assert arr.unit == (u.km, u.km / u.s, u.km, u.km / u.s, u.s)
def test_merge_arrays_asrecarray(self):
with pytest.raises(ValueError, match="asrecarray=True is not supported."):
rfn.merge_arrays(self.q_pv, asrecarray=True)
def test_merge_arrays_usemask(self):
with pytest.raises(ValueError, match="usemask=True is not supported."):
rfn.merge_arrays(self.q_pv, usemask=True)
@pytest.mark.parametrize("flatten", [True, False])
def test_merge_arrays_str(self, flatten):
with pytest.raises(
TypeError, match="the Quantity implementation cannot handle"
):
rfn.merge_arrays((self.q_pv, np.array(["a", "b", "c"])), flatten=flatten)
untested_functions = set()
if NUMPY_LT_1_23:
deprecated_functions = {
# Deprecated, removed in numpy 1.23
np.asscalar,
np.alen,
}
else:
deprecated_functions = set()
untested_functions |= deprecated_functions
io_functions = {np.save, np.savez, np.savetxt, np.savez_compressed}
untested_functions |= io_functions
poly_functions = {
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
} # fmt: skip
untested_functions |= poly_functions
rec_functions = {
rfn.rec_append_fields, rfn.rec_drop_fields, rfn.rec_join,
rfn.drop_fields, rfn.rename_fields, rfn.append_fields, rfn.join_by,
rfn.repack_fields, rfn.apply_along_fields, rfn.assign_fields_by_name,
rfn.stack_arrays, rfn.find_duplicates,
rfn.recursive_fill_fields, rfn.require_fields,
} # fmt: skip
untested_functions |= rec_functions
@needs_array_function
def test_testing_completeness():
assert not CoverageMeta.covered.intersection(untested_functions)
assert all_wrapped == (CoverageMeta.covered | untested_functions)
class TestFunctionHelpersCompleteness:
@pytest.mark.parametrize(
"one, two",
itertools.combinations(
(
SUBCLASS_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
set(FUNCTION_HELPERS.keys()),
set(DISPATCHED_FUNCTIONS.keys()),
),
2,
),
)
def test_no_duplicates(self, one, two):
assert not one.intersection(two)
@needs_array_function
def test_all_included(self):
included_in_helpers = (
SUBCLASS_SAFE_FUNCTIONS
| UNSUPPORTED_FUNCTIONS
| set(FUNCTION_HELPERS.keys())
| set(DISPATCHED_FUNCTIONS.keys())
)
assert all_wrapped == included_in_helpers
# untested_function is created using all_wrapped_functions
@needs_array_function
def test_ignored_are_untested(self):
assert IGNORED_FUNCTIONS | TBD_FUNCTIONS == untested_functions
|
b4df3686b26a66274e8bacd997d95aaebf703963703bd5c6fdc16568a7e8c51e | # The purpose of these tests are to ensure that calling ufuncs with quantities
# returns quantities with the right units, or raises exceptions.
import concurrent.futures
import dataclasses
import warnings
from collections import namedtuple
import numpy as np
import pytest
from erfa import ufunc as erfa_ufunc
from numpy.testing import assert_allclose, assert_array_equal
from astropy import units as u
from astropy.units import quantity_helper as qh
from astropy.units.quantity_helper.converters import UfuncHelpers
from astropy.units.quantity_helper.helpers import helper_sqrt
from astropy.utils.compat.optional_deps import HAS_SCIPY
testcase = namedtuple("testcase", ["f", "q_in", "q_out"])
testexc = namedtuple("testexc", ["f", "q_in", "exc", "msg"])
testwarn = namedtuple("testwarn", ["f", "q_in", "wfilter"])
@pytest.mark.skip
def test_testcase(tc):
results = tc.f(*tc.q_in)
# careful of the following line, would break on a function returning
# a single tuple (as opposed to tuple of return values)
results = (results,) if not isinstance(results, tuple) else results
for result, expected in zip(results, tc.q_out):
assert result.unit == expected.unit
assert_allclose(result.value, expected.value, atol=1.0e-15)
@pytest.mark.skip
def test_testexc(te):
with pytest.raises(te.exc) as exc:
te.f(*te.q_in)
if te.msg is not None:
assert te.msg in exc.value.args[0]
@pytest.mark.skip
def test_testwarn(tw):
with warnings.catch_warnings():
warnings.filterwarnings(tw.wfilter)
tw.f(*tw.q_in)
class TestUfuncHelpers:
# Note that this test should work even if scipy is present, since
# the scipy.special ufuncs are only loaded on demand.
# The test passes independently of whether erfa is already loaded
# (which will be the case for a full test, since coordinates uses it).
def test_coverage(self):
"""Test that we cover all ufunc's"""
all_np_ufuncs = {
ufunc
for ufunc in np.core.umath.__dict__.values()
if isinstance(ufunc, np.ufunc)
}
all_q_ufuncs = qh.UNSUPPORTED_UFUNCS | set(qh.UFUNC_HELPERS.keys())
# Check that every numpy ufunc is covered.
assert all_np_ufuncs - all_q_ufuncs == set()
# Check that all ufuncs we cover come from numpy or erfa.
# (Since coverage for erfa is incomplete, we do not check
# this the other way).
all_erfa_ufuncs = {
ufunc
for ufunc in erfa_ufunc.__dict__.values()
if isinstance(ufunc, np.ufunc)
}
assert all_q_ufuncs - all_np_ufuncs - all_erfa_ufuncs == set()
def test_scipy_registered(self):
# Should be registered as existing even if scipy is not available.
assert "scipy.special" in qh.UFUNC_HELPERS.modules
def test_removal_addition(self):
assert np.add in qh.UFUNC_HELPERS
assert np.add not in qh.UNSUPPORTED_UFUNCS
qh.UFUNC_HELPERS[np.add] = None
assert np.add not in qh.UFUNC_HELPERS
assert np.add in qh.UNSUPPORTED_UFUNCS
qh.UFUNC_HELPERS[np.add] = qh.UFUNC_HELPERS[np.subtract]
assert np.add in qh.UFUNC_HELPERS
assert np.add not in qh.UNSUPPORTED_UFUNCS
@pytest.mark.slow
def test_thread_safety(self, fast_thread_switching):
def dummy_ufunc(*args, **kwargs):
return np.sqrt(*args, **kwargs)
def register():
return {dummy_ufunc: helper_sqrt}
workers = 8
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
for p in range(10000):
helpers = UfuncHelpers()
helpers.register_module(
"astropy.units.tests.test_quantity_ufuncs",
["dummy_ufunc"],
register,
)
futures = [
executor.submit(lambda: helpers[dummy_ufunc])
for i in range(workers)
]
values = [future.result() for future in futures]
assert values == [helper_sqrt] * workers
class TestQuantityTrigonometricFuncs:
"""
Test trigonometric functions
"""
@pytest.mark.parametrize(
"tc",
(
testcase(
f=np.sin,
q_in=(30.0 * u.degree,),
q_out=(0.5 * u.dimensionless_unscaled,),
),
testcase(
f=np.sin,
q_in=(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian,),
q_out=(np.array([0.0, 1.0 / np.sqrt(2.0), 1.0]) * u.one,),
),
testcase(
f=np.arcsin,
q_in=(np.sin(30.0 * u.degree),),
q_out=(np.radians(30.0) * u.radian,),
),
testcase(
f=np.arcsin,
q_in=(np.sin(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian),),
q_out=(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian,),
),
testcase(
f=np.cos,
q_in=(np.pi / 3.0 * u.radian,),
q_out=(0.5 * u.dimensionless_unscaled,),
),
testcase(
f=np.cos,
q_in=(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian,),
q_out=(np.array([1.0, 1.0 / np.sqrt(2.0), 0.0]) * u.one,),
),
testcase(
f=np.arccos,
q_in=(np.cos(np.pi / 3.0 * u.radian),),
q_out=(np.pi / 3.0 * u.radian,),
),
testcase(
f=np.arccos,
q_in=(np.cos(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian),),
q_out=(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian,),
),
testcase(
f=np.tan,
q_in=(np.pi / 3.0 * u.radian,),
q_out=(np.sqrt(3.0) * u.dimensionless_unscaled,),
),
testcase(
f=np.tan,
q_in=(np.array([0.0, 45.0, 135.0, 180.0]) * u.degree,),
q_out=(np.array([0.0, 1.0, -1.0, 0.0]) * u.dimensionless_unscaled,),
),
testcase(
f=np.arctan,
q_in=(np.tan(np.pi / 3.0 * u.radian),),
q_out=(np.pi / 3.0 * u.radian,),
),
testcase(
f=np.arctan,
q_in=(np.tan(np.array([10.0, 30.0, 70.0, 80.0]) * u.degree),),
q_out=(np.radians(np.array([10.0, 30.0, 70.0, 80.0]) * u.degree),),
),
testcase(
f=np.arctan2,
q_in=(np.array([10.0, 30.0, 70.0, 80.0]) * u.m, 2.0 * u.km),
q_out=(
np.arctan2(np.array([10.0, 30.0, 70.0, 80.0]), 2000.0) * u.radian,
),
),
testcase(
f=np.arctan2,
q_in=((np.array([10.0, 80.0]) * u.m / (2.0 * u.km)).to(u.one), 1.0),
q_out=(np.arctan2(np.array([10.0, 80.0]) / 2000.0, 1.0) * u.radian,),
),
testcase(f=np.deg2rad, q_in=(180.0 * u.degree,), q_out=(np.pi * u.radian,)),
testcase(f=np.radians, q_in=(180.0 * u.degree,), q_out=(np.pi * u.radian,)),
testcase(f=np.deg2rad, q_in=(3.0 * u.radian,), q_out=(3.0 * u.radian,)),
testcase(f=np.radians, q_in=(3.0 * u.radian,), q_out=(3.0 * u.radian,)),
testcase(f=np.rad2deg, q_in=(60.0 * u.degree,), q_out=(60.0 * u.degree,)),
testcase(f=np.degrees, q_in=(60.0 * u.degree,), q_out=(60.0 * u.degree,)),
testcase(f=np.rad2deg, q_in=(np.pi * u.radian,), q_out=(180.0 * u.degree,)),
testcase(f=np.degrees, q_in=(np.pi * u.radian,), q_out=(180.0 * u.degree,)),
),
)
def test_testcases(self, tc):
return test_testcase(tc)
@pytest.mark.parametrize(
"te",
(
testexc(f=np.deg2rad, q_in=(3.0 * u.m,), exc=TypeError, msg=None),
testexc(f=np.radians, q_in=(3.0 * u.m,), exc=TypeError, msg=None),
testexc(f=np.rad2deg, q_in=(3.0 * u.m), exc=TypeError, msg=None),
testexc(f=np.degrees, q_in=(3.0 * u.m), exc=TypeError, msg=None),
testexc(
f=np.sin,
q_in=(3.0 * u.m,),
exc=TypeError,
msg="Can only apply 'sin' function to quantities with angle units",
),
testexc(
f=np.arcsin,
q_in=(3.0 * u.m,),
exc=TypeError,
msg="Can only apply 'arcsin' function to dimensionless quantities",
),
testexc(
f=np.cos,
q_in=(3.0 * u.s,),
exc=TypeError,
msg="Can only apply 'cos' function to quantities with angle units",
),
testexc(
f=np.arccos,
q_in=(3.0 * u.s,),
exc=TypeError,
msg="Can only apply 'arccos' function to dimensionless quantities",
),
testexc(
f=np.tan,
q_in=(np.array([1, 2, 3]) * u.N,),
exc=TypeError,
msg="Can only apply 'tan' function to quantities with angle units",
),
testexc(
f=np.arctan,
q_in=(np.array([1, 2, 3]) * u.N,),
exc=TypeError,
msg="Can only apply 'arctan' function to dimensionless quantities",
),
testexc(
f=np.arctan2,
q_in=(np.array([1, 2, 3]) * u.N, 1.0 * u.s),
exc=u.UnitsError,
msg="compatible dimensions",
),
testexc(
f=np.arctan2,
q_in=(np.array([1, 2, 3]) * u.N, 1.0),
exc=u.UnitsError,
msg="dimensionless quantities when other arg",
),
),
)
def test_testexcs(self, te):
return test_testexc(te)
@pytest.mark.parametrize(
"tw",
(testwarn(f=np.arcsin, q_in=(27.0 * u.pc / (15 * u.kpc),), wfilter="error"),),
)
def test_testwarns(self, tw):
return test_testwarn(tw)
class TestQuantityMathFuncs:
"""
Test other mathematical functions
"""
def test_multiply_scalar(self):
assert np.multiply(4.0 * u.m, 2.0 / u.s) == 8.0 * u.m / u.s
assert np.multiply(4.0 * u.m, 2.0) == 8.0 * u.m
assert np.multiply(4.0, 2.0 / u.s) == 8.0 / u.s
def test_multiply_array(self):
assert np.all(
np.multiply(np.arange(3.0) * u.m, 2.0 / u.s)
== np.arange(0, 6.0, 2.0) * u.m / u.s
)
@pytest.mark.skipif(
not isinstance(getattr(np, "matmul", None), np.ufunc),
reason="np.matmul is not yet a gufunc",
)
def test_matmul(self):
q = np.arange(3.0) * u.m
r = np.matmul(q, q)
assert r == 5.0 * u.m**2
# less trivial case.
q1 = np.eye(3) * u.m
q2 = np.array(
[[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]],
[[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]],
[[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]]]
) / u.s # fmt: skip
r2 = np.matmul(q1, q2)
assert np.all(r2 == np.matmul(q1.value, q2.value) * q1.unit * q2.unit)
@pytest.mark.parametrize("function", (np.divide, np.true_divide))
def test_divide_scalar(self, function):
assert function(4.0 * u.m, 2.0 * u.s) == function(4.0, 2.0) * u.m / u.s
assert function(4.0 * u.m, 2.0) == function(4.0, 2.0) * u.m
assert function(4.0, 2.0 * u.s) == function(4.0, 2.0) / u.s
@pytest.mark.parametrize("function", (np.divide, np.true_divide))
def test_divide_array(self, function):
assert np.all(
function(np.arange(3.0) * u.m, 2.0 * u.s)
== function(np.arange(3.0), 2.0) * u.m / u.s
)
def test_floor_divide_remainder_and_divmod(self):
inch = u.Unit(0.0254 * u.m)
dividend = np.array([1.0, 2.0, 3.0]) * u.m
divisor = np.array([3.0, 4.0, 5.0]) * inch
quotient = dividend // divisor
remainder = dividend % divisor
assert_allclose(quotient.value, [13.0, 19.0, 23.0])
assert quotient.unit == u.dimensionless_unscaled
assert_allclose(remainder.value, [0.0094, 0.0696, 0.079])
assert remainder.unit == dividend.unit
quotient2 = np.floor_divide(dividend, divisor)
remainder2 = np.remainder(dividend, divisor)
assert np.all(quotient2 == quotient)
assert np.all(remainder2 == remainder)
quotient3, remainder3 = divmod(dividend, divisor)
assert np.all(quotient3 == quotient)
assert np.all(remainder3 == remainder)
with pytest.raises(TypeError):
divmod(dividend, u.km)
with pytest.raises(TypeError):
dividend // u.km
with pytest.raises(TypeError):
dividend % u.km
quotient4, remainder4 = np.divmod(dividend, divisor)
assert np.all(quotient4 == quotient)
assert np.all(remainder4 == remainder)
with pytest.raises(TypeError):
np.divmod(dividend, u.km)
def test_sqrt_scalar(self):
assert np.sqrt(4.0 * u.m) == 2.0 * u.m**0.5
def test_sqrt_array(self):
assert np.all(
np.sqrt(np.array([1.0, 4.0, 9.0]) * u.m)
== np.array([1.0, 2.0, 3.0]) * u.m**0.5
)
def test_square_scalar(self):
assert np.square(4.0 * u.m) == 16.0 * u.m**2
def test_square_array(self):
assert np.all(
np.square(np.array([1.0, 2.0, 3.0]) * u.m)
== np.array([1.0, 4.0, 9.0]) * u.m**2
)
def test_reciprocal_scalar(self):
assert np.reciprocal(4.0 * u.m) == 0.25 / u.m
def test_reciprocal_array(self):
assert np.all(
np.reciprocal(np.array([1.0, 2.0, 4.0]) * u.m)
== np.array([1.0, 0.5, 0.25]) / u.m
)
def test_heaviside_scalar(self):
assert np.heaviside(0.0 * u.m, 0.5) == 0.5 * u.dimensionless_unscaled
assert (
np.heaviside(0.0 * u.s, 25 * u.percent) == 0.25 * u.dimensionless_unscaled
)
assert np.heaviside(2.0 * u.J, 0.25) == 1.0 * u.dimensionless_unscaled
def test_heaviside_array(self):
values = np.array([-1.0, 0.0, 0.0, +1.0])
halfway = np.array([0.75, 0.25, 0.75, 0.25]) * u.dimensionless_unscaled
assert np.all(
np.heaviside(values * u.m, halfway * u.dimensionless_unscaled)
== [0, 0.25, 0.75, +1.0] * u.dimensionless_unscaled
)
@pytest.mark.parametrize("function", (np.cbrt,))
def test_cbrt_scalar(self, function):
assert function(8.0 * u.m**3) == 2.0 * u.m
@pytest.mark.parametrize("function", (np.cbrt,))
def test_cbrt_array(self, function):
# Calculate cbrt on both sides since on Windows the cube root of 64
# does not exactly equal 4. See 4388.
values = np.array([1.0, 8.0, 64.0])
assert np.all(function(values * u.m**3) == function(values) * u.m)
def test_power_scalar(self):
assert np.power(4.0 * u.m, 2.0) == 16.0 * u.m**2
assert np.power(4.0, 200.0 * u.cm / u.m) == u.Quantity(
16.0, u.dimensionless_unscaled
)
# regression check on #1696
assert np.power(4.0 * u.m, 0.0) == 1.0 * u.dimensionless_unscaled
def test_power_array(self):
assert np.all(
np.power(np.array([1.0, 2.0, 3.0]) * u.m, 3.0)
== np.array([1.0, 8.0, 27.0]) * u.m**3
)
# regression check on #1696
assert np.all(
np.power(np.arange(4.0) * u.m, 0.0) == 1.0 * u.dimensionless_unscaled
)
def test_float_power_array(self):
assert np.all(
np.float_power(np.array([1.0, 2.0, 3.0]) * u.m, 3.0)
== np.array([1.0, 8.0, 27.0]) * u.m**3
)
# regression check on #1696
assert np.all(
np.float_power(np.arange(4.0) * u.m, 0.0) == 1.0 * u.dimensionless_unscaled
)
def test_power_array_array(self):
with pytest.raises(ValueError):
np.power(4.0 * u.m, [2.0, 4.0])
def test_power_array_array2(self):
with pytest.raises(ValueError):
np.power([2.0, 4.0] * u.m, [2.0, 4.0])
def test_power_array_array3(self):
# Identical unit fractions are converted automatically to dimensionless
# and should be allowed as base for np.power: #4764
q = [2.0, 4.0] * u.m / u.m
powers = [2.0, 4.0]
res = np.power(q, powers)
assert np.all(res.value == q.value**powers)
assert res.unit == u.dimensionless_unscaled
# The same holds for unit fractions that are scaled dimensionless.
q2 = [2.0, 4.0] * u.m / u.cm
# Test also against different types of exponent
for cls in (list, tuple, np.array, np.ma.array, u.Quantity):
res2 = np.power(q2, cls(powers))
assert np.all(res2.value == q2.to_value(1) ** powers)
assert res2.unit == u.dimensionless_unscaled
# Though for single powers, we keep the composite unit.
res3 = q2**2
assert np.all(res3.value == q2.value**2)
assert res3.unit == q2.unit**2
assert np.all(res3 == q2 ** [2, 2])
def test_power_invalid(self):
with pytest.raises(TypeError, match="raise something to a dimensionless"):
np.power(3.0, 4.0 * u.m)
def test_copysign_scalar(self):
assert np.copysign(3 * u.m, 1.0) == 3.0 * u.m
assert np.copysign(3 * u.m, 1.0 * u.s) == 3.0 * u.m
assert np.copysign(3 * u.m, -1.0) == -3.0 * u.m
assert np.copysign(3 * u.m, -1.0 * u.s) == -3.0 * u.m
def test_copysign_array(self):
assert np.all(
np.copysign(np.array([1.0, 2.0, 3.0]) * u.s, -1.0)
== -np.array([1.0, 2.0, 3.0]) * u.s
)
assert np.all(
np.copysign(np.array([1.0, 2.0, 3.0]) * u.s, -1.0 * u.m)
== -np.array([1.0, 2.0, 3.0]) * u.s
)
assert np.all(
np.copysign(
np.array([1.0, 2.0, 3.0]) * u.s, np.array([-2.0, 2.0, -4.0]) * u.m
)
== np.array([-1.0, 2.0, -3.0]) * u.s
)
q = np.copysign(np.array([1.0, 2.0, 3.0]), -3 * u.m)
assert np.all(q == np.array([-1.0, -2.0, -3.0]))
assert not isinstance(q, u.Quantity)
def test_ldexp_scalar(self):
assert np.ldexp(4.0 * u.m, 2) == 16.0 * u.m
def test_ldexp_array(self):
assert np.all(
np.ldexp(np.array([1.0, 2.0, 3.0]) * u.m, [3, 2, 1])
== np.array([8.0, 8.0, 6.0]) * u.m
)
def test_ldexp_invalid(self):
with pytest.raises(TypeError):
np.ldexp(3.0 * u.m, 4.0)
with pytest.raises(TypeError):
np.ldexp(3.0, u.Quantity(4, u.m, dtype=int))
@pytest.mark.parametrize(
"function", (np.exp, np.expm1, np.exp2, np.log, np.log2, np.log10, np.log1p)
)
def test_exp_scalar(self, function):
q = function(3.0 * u.m / (6.0 * u.m))
assert q.unit == u.dimensionless_unscaled
assert q.value == function(0.5)
@pytest.mark.parametrize(
"function", (np.exp, np.expm1, np.exp2, np.log, np.log2, np.log10, np.log1p)
)
def test_exp_array(self, function):
q = function(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.m))
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value == function(np.array([1.0 / 3.0, 1.0 / 2.0, 1.0])))
# should also work on quantities that can be made dimensionless
q2 = function(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.cm))
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value, function(np.array([100.0 / 3.0, 100.0 / 2.0, 100.0])))
@pytest.mark.parametrize(
"function", (np.exp, np.expm1, np.exp2, np.log, np.log2, np.log10, np.log1p)
)
def test_exp_invalid_units(self, function):
# Can't use exp() with non-dimensionless quantities
with pytest.raises(
TypeError,
match=(
f"Can only apply '{function.__name__}' function "
"to dimensionless quantities"
),
):
function(3.0 * u.m / u.s)
def test_modf_scalar(self):
q = np.modf(9.0 * u.m / (600.0 * u.cm))
assert q == (0.5 * u.dimensionless_unscaled, 1.0 * u.dimensionless_unscaled)
def test_modf_array(self):
v = np.arange(10.0) * u.m / (500.0 * u.cm)
q = np.modf(v)
n = np.modf(v.to_value(u.dimensionless_unscaled))
assert q[0].unit == u.dimensionless_unscaled
assert q[1].unit == u.dimensionless_unscaled
assert all(q[0].value == n[0])
assert all(q[1].value == n[1])
def test_frexp_scalar(self):
q = np.frexp(3.0 * u.m / (6.0 * u.m))
assert q == (np.array(0.5), np.array(0.0))
def test_frexp_array(self):
q = np.frexp(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.m))
assert all(
(_q0, _q1) == np.frexp(_d)
for _q0, _q1, _d in zip(q[0], q[1], [1.0 / 3.0, 1.0 / 2.0, 1.0])
)
def test_frexp_invalid_units(self):
# Can't use prod() with non-dimensionless quantities
with pytest.raises(
TypeError,
match=(
"Can only apply 'frexp' function to unscaled dimensionless quantities"
),
):
np.frexp(3.0 * u.m / u.s)
# also does not work on quantities that can be made dimensionless
with pytest.raises(
TypeError,
match=(
"Can only apply 'frexp' function to unscaled dimensionless quantities"
),
):
np.frexp(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.cm))
@pytest.mark.parametrize("function", (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_array(self, function):
q = function(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.cm), 1.0)
assert q.unit == u.dimensionless_unscaled
assert_allclose(
q.value, function(np.array([100.0 / 3.0, 100.0 / 2.0, 100.0]), 1.0)
)
@pytest.mark.parametrize("function", (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_invalid_units(self, function):
with pytest.raises(
TypeError,
match=(
f"Can only apply '{function.__name__}' function to dimensionless"
" quantities"
),
):
function(1.0 * u.km / u.s, 3.0 * u.m / u.s)
class TestInvariantUfuncs:
@pytest.mark.parametrize(
"ufunc",
[
np.absolute,
np.fabs,
np.conj,
np.conjugate,
np.negative,
np.spacing,
np.rint,
np.floor,
np.ceil,
np.positive,
],
)
def test_invariant_scalar(self, ufunc):
q_i = 4.7 * u.m
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert q_o.value == ufunc(q_i.value)
@pytest.mark.parametrize(
"ufunc", [np.absolute, np.conjugate, np.negative, np.rint, np.floor, np.ceil]
)
def test_invariant_array(self, ufunc):
q_i = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert np.all(q_o.value == ufunc(q_i.value))
@pytest.mark.parametrize(
"ufunc",
[
np.add,
np.subtract,
np.hypot,
np.maximum,
np.minimum,
np.nextafter,
np.remainder,
np.mod,
np.fmod,
],
)
def test_invariant_twoarg_scalar(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.km
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
@pytest.mark.parametrize(
"ufunc",
[
np.add,
np.subtract,
np.hypot,
np.maximum,
np.minimum,
np.nextafter,
np.remainder,
np.mod,
np.fmod,
],
)
def test_invariant_twoarg_array(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10.0, -5.0, 1.0e6]) * u.g / u.us
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
@pytest.mark.parametrize(
("ufunc", "arbitrary"),
[
(np.add, 0.0),
(np.subtract, 0.0),
(np.hypot, 0.0),
(np.maximum, 0.0),
(np.minimum, 0.0),
(np.nextafter, 0.0),
(np.remainder, np.inf),
(np.mod, np.inf),
(np.fmod, np.inf),
],
)
def test_invariant_twoarg_one_arbitrary(self, ufunc, arbitrary):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_o = ufunc(q_i1, arbitrary)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, arbitrary))
@pytest.mark.parametrize(
"ufunc",
[
np.add,
np.subtract,
np.hypot,
np.maximum,
np.minimum,
np.nextafter,
np.remainder,
np.mod,
np.fmod,
],
)
def test_invariant_twoarg_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError, match="compatible dimensions"):
ufunc(q_i1, q_i2)
class TestComparisonUfuncs:
@pytest.mark.parametrize(
"ufunc",
[np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal, np.equal],
)
def test_comparison_valid_units(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10.0, -5.0, 1.0e6]) * u.g / u.Ms
q_o = ufunc(q_i1, q_i2)
assert not isinstance(q_o, u.Quantity)
assert q_o.dtype == bool
assert np.all(q_o == ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
q_o2 = ufunc(q_i1 / q_i2, 2.0)
assert not isinstance(q_o2, u.Quantity)
assert q_o2.dtype == bool
assert np.all(
q_o2 == ufunc((q_i1 / q_i2).to_value(u.dimensionless_unscaled), 2.0)
)
# comparison with 0., inf, nan is OK even for dimensional quantities
# (though ignore numpy runtime warnings for comparisons with nan).
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
for arbitrary_unit_value in (0.0, np.inf, np.nan):
ufunc(q_i1, arbitrary_unit_value)
ufunc(q_i1, arbitrary_unit_value * np.ones(len(q_i1)))
# and just for completeness
ufunc(q_i1, np.array([0.0, np.inf, np.nan]))
@pytest.mark.parametrize(
"ufunc",
[np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal, np.equal],
)
def test_comparison_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError, match="compatible dimensions"):
ufunc(q_i1, q_i2)
@pytest.mark.parametrize("ufunc", (np.isfinite, np.isinf, np.isnan, np.signbit))
def test_onearg_test_ufuncs(self, ufunc):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
out = ufunc(q)
assert not isinstance(out, u.Quantity)
assert out.dtype == bool
assert np.all(out == ufunc(q.value))
# Ignore RuntimeWarning raised on Windows and s390.
@pytest.mark.filterwarnings("ignore:.*invalid value encountered in sign")
def test_sign(self):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
out = np.sign(q)
assert not isinstance(out, u.Quantity)
assert out.dtype == q.dtype
assert np.all((out == np.sign(q.value)) | (np.isnan(out) & np.isnan(q.value)))
class TestInplaceUfuncs:
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_one_argument_ufunc_inplace(self, value):
# without scaling
s = value * u.rad
check = s
np.sin(s, out=s)
assert check is s
assert check.unit == u.dimensionless_unscaled
# with scaling
s2 = (value * u.rad).to(u.deg)
check2 = s2
np.sin(s2, out=s2)
assert check2 is s2
assert check2.unit == u.dimensionless_unscaled
assert_allclose(s.value, s2.value)
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_one_argument_ufunc_inplace_2(self, value):
"""Check inplace works with non-quantity input and quantity output"""
s = value * u.m
check = s
np.absolute(value, out=s)
assert check is s
assert np.all(check.value == np.absolute(value))
assert check.unit is u.dimensionless_unscaled
np.sqrt(value, out=s)
assert check is s
assert np.all(check.value == np.sqrt(value))
assert check.unit is u.dimensionless_unscaled
np.exp(value, out=s)
assert check is s
assert np.all(check.value == np.exp(value))
assert check.unit is u.dimensionless_unscaled
np.arcsin(value / 10.0, out=s)
assert check is s
assert np.all(check.value == np.arcsin(value / 10.0))
assert check.unit is u.radian
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_one_argument_two_output_ufunc_inplace(self, value):
v = 100.0 * value * u.cm / u.m
v_copy = v.copy()
tmp = v.copy()
check = v
np.modf(v, tmp, v)
assert check is v
assert check.unit == u.dimensionless_unscaled
v2 = v_copy.to(u.dimensionless_unscaled)
check2 = v2
np.modf(v2, tmp, v2)
assert check2 is v2
assert check2.unit == u.dimensionless_unscaled
# can also replace in last position if no scaling is needed
v3 = v_copy.to(u.dimensionless_unscaled)
check3 = v3
np.modf(v3, v3, tmp)
assert check3 is v3
assert check3.unit == u.dimensionless_unscaled
# can also replace input with first output when scaling
v4 = v_copy.copy()
check4 = v4
np.modf(v4, v4, tmp)
assert check4 is v4
assert check4.unit == u.dimensionless_unscaled
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_two_argument_ufunc_inplace_1(self, value):
s = value * u.cycle
check = s
s /= 2.0
assert check is s
assert np.all(check.value == value / 2.0)
s /= u.s
assert check is s
assert check.unit == u.cycle / u.s
s *= 2.0 * u.s
assert check is s
assert np.all(check == value * u.cycle)
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_two_argument_ufunc_inplace_2(self, value):
s = value * u.cycle
check = s
np.arctan2(s, s, out=s)
assert check is s
assert check.unit == u.radian
with pytest.raises(u.UnitsError):
s += 1.0 * u.m
assert check is s
assert check.unit == u.radian
np.arctan2(1.0 * u.deg, s, out=s)
assert check is s
assert check.unit == u.radian
np.add(1.0 * u.deg, s, out=s)
assert check is s
assert check.unit == u.deg
np.multiply(2.0 / u.s, s, out=s)
assert check is s
assert check.unit == u.deg / u.s
def test_two_argument_ufunc_inplace_3(self):
s = np.array([1.0, 2.0, 3.0]) * u.dimensionless_unscaled
np.add(np.array([1.0, 2.0, 3.0]), np.array([1.0, 2.0, 3.0]) * 2.0, out=s)
assert np.all(s.value == np.array([3.0, 6.0, 9.0]))
assert s.unit is u.dimensionless_unscaled
np.arctan2(np.array([1.0, 2.0, 3.0]), np.array([1.0, 2.0, 3.0]) * 2.0, out=s)
assert_allclose(s.value, np.arctan2(1.0, 2.0))
assert s.unit is u.radian
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_two_argument_two_output_ufunc_inplace(self, value):
v = value * u.m
divisor = 70.0 * u.cm
v1 = v.copy()
tmp = v.copy()
check = np.divmod(v1, divisor, out=(tmp, v1))
assert check[0] is tmp and check[1] is v1
assert tmp.unit == u.dimensionless_unscaled
assert v1.unit == v.unit
v2 = v.copy()
check2 = np.divmod(v2, divisor, out=(v2, tmp))
assert check2[0] is v2 and check2[1] is tmp
assert v2.unit == u.dimensionless_unscaled
assert tmp.unit == v.unit
v3a = v.copy()
v3b = v.copy()
check3 = np.divmod(v3a, divisor, out=(v3a, v3b))
assert check3[0] is v3a and check3[1] is v3b
assert v3a.unit == u.dimensionless_unscaled
assert v3b.unit == v.unit
def test_ufunc_inplace_non_contiguous_data(self):
# ensure inplace works also for non-contiguous data (closes #1834)
s = np.arange(10.0) * u.m
s_copy = s.copy()
s2 = s[::2]
s2 += 1.0 * u.cm
assert np.all(s[::2] > s_copy[::2])
assert np.all(s[1::2] == s_copy[1::2])
def test_ufunc_inplace_non_standard_dtype(self):
"""Check that inplace operations check properly for casting.
First two tests that check that float32 is kept close #3976.
"""
a1 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a1 *= np.float32(10)
assert a1.unit is u.m
assert a1.dtype == np.float32
a2 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a2 += 20.0 * u.km
assert a2.unit is u.m
assert a2.dtype == np.float32
# For integer, in-place only works if no conversion is done.
a3 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
a3 += u.Quantity(10, u.m, dtype=np.int64)
assert a3.unit is u.m
assert a3.dtype == np.int32
a4 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
with pytest.raises(TypeError):
a4 += u.Quantity(10, u.mm, dtype=np.int64)
@pytest.mark.parametrize("ufunc", (np.equal, np.greater))
def test_comparison_ufuncs_inplace(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10.0, -5.0, 1.0e6]) * u.g / u.Ms
check = np.empty(q_i1.shape, bool)
ufunc(q_i1.value, q_i2.to_value(q_i1.unit), out=check)
result = np.empty(q_i1.shape, bool)
q_o = ufunc(q_i1, q_i2, out=result)
assert q_o is result
assert type(q_o) is np.ndarray
assert q_o.dtype == bool
assert np.all(q_o == check)
@pytest.mark.parametrize("ufunc", (np.isfinite, np.signbit))
def test_onearg_test_ufuncs_inplace(self, ufunc):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
check = np.empty(q.shape, bool)
ufunc(q.value, out=check)
result = np.empty(q.shape, bool)
out = ufunc(q, out=result)
assert out is result
assert type(out) is np.ndarray
assert out.dtype == bool
assert np.all(out == ufunc(q.value))
# Ignore RuntimeWarning raised on Windows and s390.
@pytest.mark.filterwarnings("ignore:.*invalid value encountered in sign")
def test_sign_inplace(self):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
check = np.empty(q.shape, q.dtype)
np.sign(q.value, out=check)
result = np.empty(q.shape, q.dtype)
out = np.sign(q, out=result)
assert out is result
assert type(out) is np.ndarray
assert out.dtype == q.dtype
assert np.all((out == np.sign(q.value)) | (np.isnan(out) & np.isnan(q.value)))
def test_ndarray_inplace_op_with_quantity(self):
"""Regression test for gh-13911."""
a = np.arange(3.0)
q = u.Quantity([12.5, 25.0], u.percent)
a[:2] += q # This used to fail
assert_array_equal(a, np.array([0.125, 1.25, 2.0]))
@pytest.mark.skipif(
not hasattr(np.core.umath, "clip"), reason="no clip ufunc available"
)
class TestClip:
"""Test the clip ufunc.
In numpy, this is hidden behind a function that does not backwards
compatibility checks. We explicitly test the ufunc here.
"""
def setup_method(self):
self.clip = np.core.umath.clip
def test_clip_simple(self):
q = np.arange(-1.0, 10.0) * u.m
q_min = 125 * u.cm
q_max = 0.0055 * u.km
result = self.clip(q, q_min, q_max)
assert result.unit == q.unit
expected = (
self.clip(q.value, q_min.to_value(q.unit), q_max.to_value(q.unit)) * q.unit
)
assert np.all(result == expected)
def test_clip_unitless_parts(self):
q = np.arange(-1.0, 10.0) * u.m
qlim = 0.0055 * u.km
# one-sided
result1 = self.clip(q, -np.inf, qlim)
expected1 = self.clip(q.value, -np.inf, qlim.to_value(q.unit)) * q.unit
assert np.all(result1 == expected1)
result2 = self.clip(q, qlim, np.inf)
expected2 = self.clip(q.value, qlim.to_value(q.unit), np.inf) * q.unit
assert np.all(result2 == expected2)
# Zero
result3 = self.clip(q, np.zeros(q.shape), qlim)
expected3 = self.clip(q.value, 0, qlim.to_value(q.unit)) * q.unit
assert np.all(result3 == expected3)
# Two unitless parts, array-shaped.
result4 = self.clip(q, np.zeros(q.shape), np.full(q.shape, np.inf))
expected4 = self.clip(q.value, 0, np.inf) * q.unit
assert np.all(result4 == expected4)
def test_clip_dimensionless(self):
q = np.arange(-1.0, 10.0) * u.dimensionless_unscaled
result = self.clip(q, 200 * u.percent, 5.0)
expected = self.clip(q, 2.0, 5.0)
assert result.unit == u.dimensionless_unscaled
assert np.all(result == expected)
def test_clip_ndarray(self):
a = np.arange(-1.0, 10.0)
result = self.clip(a, 200 * u.percent, 5.0 * u.dimensionless_unscaled)
assert isinstance(result, u.Quantity)
expected = self.clip(a, 2.0, 5.0) * u.dimensionless_unscaled
assert np.all(result == expected)
def test_clip_quantity_inplace(self):
q = np.arange(-1.0, 10.0) * u.m
q_min = 125 * u.cm
q_max = 0.0055 * u.km
expected = (
self.clip(q.value, q_min.to_value(q.unit), q_max.to_value(q.unit)) * q.unit
)
result = self.clip(q, q_min, q_max, out=q)
assert result is q
assert np.all(result == expected)
def test_clip_ndarray_dimensionless_output(self):
a = np.arange(-1.0, 10.0)
q = np.zeros_like(a) * u.m
expected = self.clip(a, 2.0, 5.0) * u.dimensionless_unscaled
result = self.clip(a, 200 * u.percent, 5.0 * u.dimensionless_unscaled, out=q)
assert result is q
assert result.unit == u.dimensionless_unscaled
assert np.all(result == expected)
def test_clip_errors(self):
q = np.arange(-1.0, 10.0) * u.m
with pytest.raises(u.UnitsError):
self.clip(q, 0, 1 * u.s)
with pytest.raises(u.UnitsError):
self.clip(q.value, 0, 1 * u.s)
with pytest.raises(u.UnitsError):
self.clip(q, -1, 0.0)
with pytest.raises(u.UnitsError):
self.clip(q, 0.0, 1.0)
class TestUfuncAt:
"""Test that 'at' method for ufuncs (calculates in-place at given indices)
For Quantities, since calculations are in-place, it makes sense only
if the result is still a quantity, and if the unit does not have to change
"""
def test_one_argument_ufunc_at(self):
q = np.arange(10.0) * u.m
i = np.array([1, 2])
qv = q.value.copy()
np.negative.at(q, i)
np.negative.at(qv, i)
assert np.all(q.value == qv)
assert q.unit is u.m
# cannot change from quantity to bool array
with pytest.raises(TypeError):
np.isfinite.at(q, i)
# for selective in-place, cannot change the unit
with pytest.raises(u.UnitsError):
np.square.at(q, i)
# except if the unit does not change (i.e., dimensionless)
d = np.arange(10.0) * u.dimensionless_unscaled
dv = d.value.copy()
np.square.at(d, i)
np.square.at(dv, i)
assert np.all(d.value == dv)
assert d.unit is u.dimensionless_unscaled
d = np.arange(10.0) * u.dimensionless_unscaled
dv = d.value.copy()
np.log.at(d, i)
np.log.at(dv, i)
assert np.all(d.value == dv)
assert d.unit is u.dimensionless_unscaled
# also for sine it doesn't work, even if given an angle
a = np.arange(10.0) * u.radian
with pytest.raises(u.UnitsError):
np.sin.at(a, i)
# except, for consistency, if we have made radian equivalent to
# dimensionless (though hopefully it will never be needed)
av = a.value.copy()
with u.add_enabled_equivalencies(u.dimensionless_angles()):
np.sin.at(a, i)
np.sin.at(av, i)
assert_allclose(a.value, av)
# but we won't do double conversion
ad = np.arange(10.0) * u.degree
with pytest.raises(u.UnitsError):
np.sin.at(ad, i)
def test_two_argument_ufunc_at(self):
s = np.arange(10.0) * u.m
i = np.array([1, 2])
check = s.value.copy()
np.add.at(s, i, 1.0 * u.km)
np.add.at(check, i, 1000.0)
assert np.all(s.value == check)
assert s.unit is u.m
with pytest.raises(u.UnitsError):
np.add.at(s, i, 1.0 * u.s)
# also raise UnitsError if unit would have to be changed
with pytest.raises(u.UnitsError):
np.multiply.at(s, i, 1 * u.s)
# but be fine if it does not
s = np.arange(10.0) * u.m
check = s.value.copy()
np.multiply.at(s, i, 2.0 * u.dimensionless_unscaled)
np.multiply.at(check, i, 2)
assert np.all(s.value == check)
s = np.arange(10.0) * u.m
np.multiply.at(s, i, 2.0)
assert np.all(s.value == check)
# of course cannot change class of data either
with pytest.raises(TypeError):
np.greater.at(s, i, 1.0 * u.km)
class TestUfuncReduceReduceatAccumulate:
"""Test 'reduce', 'reduceat' and 'accumulate' methods for ufuncs
For Quantities, it makes sense only if the unit does not have to change
"""
def test_one_argument_ufunc_reduce_accumulate(self):
# one argument cannot be used
s = np.arange(10.0) * u.radian
i = np.array([0, 5, 1, 6])
with pytest.raises(ValueError):
np.sin.reduce(s)
with pytest.raises(ValueError):
np.sin.accumulate(s)
with pytest.raises(ValueError):
np.sin.reduceat(s, i)
def test_two_argument_ufunc_reduce_accumulate(self):
s = np.arange(10.0) * u.m
i = np.array([0, 5, 1, 6])
check = s.value.copy()
s_add_reduce = np.add.reduce(s)
check_add_reduce = np.add.reduce(check)
assert s_add_reduce.value == check_add_reduce
assert s_add_reduce.unit is u.m
s_add_accumulate = np.add.accumulate(s)
check_add_accumulate = np.add.accumulate(check)
assert np.all(s_add_accumulate.value == check_add_accumulate)
assert s_add_accumulate.unit is u.m
s_add_reduceat = np.add.reduceat(s, i)
check_add_reduceat = np.add.reduceat(check, i)
assert np.all(s_add_reduceat.value == check_add_reduceat)
assert s_add_reduceat.unit is u.m
# reduce(at) or accumulate on comparisons makes no sense,
# as intermediate result is not even a Quantity
with pytest.raises(TypeError):
np.greater.reduce(s)
with pytest.raises(TypeError):
np.greater.accumulate(s)
with pytest.raises(TypeError):
np.greater.reduceat(s, i)
# raise UnitsError if unit would have to be changed
with pytest.raises(u.UnitsError):
np.multiply.reduce(s)
with pytest.raises(u.UnitsError):
np.multiply.accumulate(s)
with pytest.raises(u.UnitsError):
np.multiply.reduceat(s, i)
# but be fine if it does not
s = np.arange(10.0) * u.dimensionless_unscaled
check = s.value.copy()
s_multiply_reduce = np.multiply.reduce(s)
check_multiply_reduce = np.multiply.reduce(check)
assert s_multiply_reduce.value == check_multiply_reduce
assert s_multiply_reduce.unit is u.dimensionless_unscaled
s_multiply_accumulate = np.multiply.accumulate(s)
check_multiply_accumulate = np.multiply.accumulate(check)
assert np.all(s_multiply_accumulate.value == check_multiply_accumulate)
assert s_multiply_accumulate.unit is u.dimensionless_unscaled
s_multiply_reduceat = np.multiply.reduceat(s, i)
check_multiply_reduceat = np.multiply.reduceat(check, i)
assert np.all(s_multiply_reduceat.value == check_multiply_reduceat)
assert s_multiply_reduceat.unit is u.dimensionless_unscaled
class TestUfuncOuter:
"""Test 'outer' methods for ufuncs
Just a few spot checks, since it uses the same code as the regular
ufunc call
"""
def test_one_argument_ufunc_outer(self):
# one argument cannot be used
s = np.arange(10.0) * u.radian
with pytest.raises(ValueError):
np.sin.outer(s)
def test_two_argument_ufunc_outer(self):
s1 = np.arange(10.0) * u.m
s2 = np.arange(2.0) * u.s
check1 = s1.value
check2 = s2.value
s12_multiply_outer = np.multiply.outer(s1, s2)
check12_multiply_outer = np.multiply.outer(check1, check2)
assert np.all(s12_multiply_outer.value == check12_multiply_outer)
assert s12_multiply_outer.unit == s1.unit * s2.unit
# raise UnitsError if appropriate
with pytest.raises(u.UnitsError):
np.add.outer(s1, s2)
# but be fine if it does not
s3 = np.arange(2.0) * s1.unit
check3 = s3.value
s13_add_outer = np.add.outer(s1, s3)
check13_add_outer = np.add.outer(check1, check3)
assert np.all(s13_add_outer.value == check13_add_outer)
assert s13_add_outer.unit is s1.unit
s13_greater_outer = np.greater.outer(s1, s3)
check13_greater_outer = np.greater.outer(check1, check3)
assert type(s13_greater_outer) is np.ndarray
assert np.all(s13_greater_outer == check13_greater_outer)
@dataclasses.dataclass
class DuckQuantity1:
data: u.Quantity
@dataclasses.dataclass
class DuckQuantity2(DuckQuantity1):
@property
def unit(self) -> u.UnitBase:
return self.data.unit
@dataclasses.dataclass(eq=False)
class DuckQuantity3(DuckQuantity2):
def __array_ufunc__(self, function, method, *inputs, **kwargs):
inputs = [inp.data if isinstance(inp, type(self)) else inp for inp in inputs]
out = kwargs.get("out", None)
kwargs_copy = {}
for k in kwargs:
kwarg = kwargs[k]
if isinstance(kwarg, type(self)):
kwargs_copy[k] = kwarg.data
elif isinstance(kwarg, (list, tuple)):
kwargs_copy[k] = type(kwarg)(
item.data if isinstance(item, type(self)) else item
for item in kwarg
)
else:
kwargs_copy[k] = kwarg
kwargs = kwargs_copy
for inp in inputs:
if isinstance(inp, np.ndarray):
result = inp.__array_ufunc__(function, method, *inputs, **kwargs)
if result is not NotImplemented:
if out is None:
return type(self)(result)
else:
if function.nout == 1:
return out[0]
else:
return out
return NotImplemented
class TestUfuncReturnsNotImplemented:
@pytest.mark.parametrize("ufunc", (np.negative, np.abs))
class TestUnaryUfuncs:
@pytest.mark.parametrize(
"duck_quantity",
[DuckQuantity1(1 * u.mm), DuckQuantity2(1 * u.mm)],
)
def test_basic(self, ufunc, duck_quantity):
with pytest.raises(TypeError, match="bad operand type for .*"):
ufunc(duck_quantity)
@pytest.mark.parametrize(
"duck_quantity", [DuckQuantity3(1 * u.mm), DuckQuantity3([1, 2] * u.mm)]
)
@pytest.mark.parametrize("out", [None, "empty"])
def test_full(self, ufunc, duck_quantity, out):
out_expected = out
if out == "empty":
out = type(duck_quantity)(np.empty_like(ufunc(duck_quantity.data)))
out_expected = np.empty_like(ufunc(duck_quantity.data))
result = ufunc(duck_quantity, out=out)
if out is not None:
assert result is out
result_expected = ufunc(duck_quantity.data, out=out_expected)
assert np.all(result.data == result_expected)
@pytest.mark.parametrize("ufunc", (np.add, np.multiply, np.less))
@pytest.mark.parametrize("quantity", (1 * u.m, [1, 2] * u.m))
class TestBinaryUfuncs:
@pytest.mark.parametrize(
"duck_quantity",
[DuckQuantity1(1 * u.mm), DuckQuantity2(1 * u.mm)],
)
def test_basic(self, ufunc, quantity, duck_quantity):
with pytest.raises(
(TypeError, ValueError),
match=(
r"(Unsupported operand type\(s\) for ufunc .*)|"
r"(unsupported operand type\(s\) for .*)|"
r"(Value not scalar compatible or convertible to an int, float, or complex array)"
),
):
ufunc(quantity, duck_quantity)
@pytest.mark.parametrize(
"duck_quantity",
[DuckQuantity3(1 * u.mm), DuckQuantity3([1, 2] * u.mm)],
)
@pytest.mark.parametrize("out", [None, "empty"])
def test_full(self, ufunc, quantity, duck_quantity, out):
out_expected = out
if out == "empty":
out = type(duck_quantity)(
np.empty_like(ufunc(quantity, duck_quantity.data))
)
out_expected = np.empty_like(ufunc(quantity, duck_quantity.data))
result = ufunc(quantity, duck_quantity, out=out)
if out is not None:
assert result is out
result_expected = ufunc(quantity, duck_quantity.data, out=out_expected)
assert np.all(result.data == result_expected)
if HAS_SCIPY:
from scipy import special as sps
erf_like_ufuncs = (
sps.erf, sps.erfc, sps.erfcx, sps.erfi,
sps.gamma, sps.gammaln, sps.loggamma, sps.gammasgn, sps.psi,
sps.rgamma, sps.digamma, sps.wofz, sps.dawsn,
sps.entr, sps.exprel, sps.expm1, sps.log1p, sps.exp2, sps.exp10,
) # fmt: skip
if isinstance(sps.erfinv, np.ufunc):
erf_like_ufuncs += (sps.erfinv, sps.erfcinv)
def test_scipy_registration():
"""Check that scipy gets loaded upon first use."""
assert sps.erf not in qh.UFUNC_HELPERS
sps.erf(1.0 * u.percent)
assert sps.erf in qh.UFUNC_HELPERS
if isinstance(sps.erfinv, np.ufunc):
assert sps.erfinv in qh.UFUNC_HELPERS
else:
assert sps.erfinv not in qh.UFUNC_HELPERS
class TestScipySpecialUfuncs:
@pytest.mark.parametrize("function", erf_like_ufuncs)
def test_erf_scalar(self, function):
TestQuantityMathFuncs.test_exp_scalar(None, function)
@pytest.mark.parametrize("function", erf_like_ufuncs)
def test_erf_array(self, function):
TestQuantityMathFuncs.test_exp_array(None, function)
@pytest.mark.parametrize("function", erf_like_ufuncs)
def test_erf_invalid_units(self, function):
TestQuantityMathFuncs.test_exp_invalid_units(None, function)
@pytest.mark.parametrize("function", (sps.cbrt,))
def test_cbrt_scalar(self, function):
TestQuantityMathFuncs.test_cbrt_scalar(None, function)
@pytest.mark.parametrize("function", (sps.cbrt,))
def test_cbrt_array(self, function):
TestQuantityMathFuncs.test_cbrt_array(None, function)
@pytest.mark.parametrize("function", (sps.radian,))
def test_radian(self, function):
q1 = function(180.0 * u.degree, 0.0 * u.arcmin, 0.0 * u.arcsec)
assert_allclose(q1.value, np.pi)
assert q1.unit == u.radian
q2 = function(0.0 * u.degree, 30.0 * u.arcmin, 0.0 * u.arcsec)
assert_allclose(q2.value, (30.0 * u.arcmin).to(u.radian).value)
assert q2.unit == u.radian
q3 = function(0.0 * u.degree, 0.0 * u.arcmin, 30.0 * u.arcsec)
assert_allclose(q3.value, (30.0 * u.arcsec).to(u.radian).value)
# the following doesn't make much sense in terms of the name of the
# routine, but we check it gives the correct result.
q4 = function(3.0 * u.radian, 0.0 * u.arcmin, 0.0 * u.arcsec)
assert_allclose(q4.value, 3.0)
assert q4.unit == u.radian
with pytest.raises(TypeError):
function(3.0 * u.m, 2.0 * u.s, 1.0 * u.kg)
jv_like_ufuncs = (
sps.jv, sps.jn, sps.jve, sps.yn, sps.yv, sps.yve, sps.kn, sps.kv,
sps.kve, sps.iv, sps.ive, sps.hankel1, sps.hankel1e, sps.hankel2,
sps.hankel2e,
) # fmt: skip
@pytest.mark.parametrize("function", jv_like_ufuncs)
def test_jv_scalar(self, function):
q = function(2.0 * u.m / (2.0 * u.m), 3.0 * u.m / (6.0 * u.m))
assert q.unit == u.dimensionless_unscaled
assert q.value == function(1.0, 0.5)
@pytest.mark.parametrize("function", jv_like_ufuncs)
def test_jv_array(self, function):
q = function(
np.ones(3) * u.m / (1.0 * u.m),
np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.m),
)
assert q.unit == u.dimensionless_unscaled
assert np.all(
q.value == function(np.ones(3), np.array([1.0 / 3.0, 1.0 / 2.0, 1.0]))
)
# should also work on quantities that can be made dimensionless
q2 = function(
np.ones(3) * u.m / (1.0 * u.m),
np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.cm),
)
assert q2.unit == u.dimensionless_unscaled
assert_allclose(
q2.value,
function(np.ones(3), np.array([100.0 / 3.0, 100.0 / 2.0, 100.0])),
)
@pytest.mark.parametrize("function", jv_like_ufuncs)
def test_jv_invalid_units(self, function):
# Can't use jv() with non-dimensionless quantities
with pytest.raises(
TypeError,
match=(
f"Can only apply '{function.__name__}' function to dimensionless"
" quantities"
),
):
function(1.0 * u.kg, 3.0 * u.m / u.s)
|
d42b78e0b432d1373d85226ccae8a488c8a6224befb63f4ef4c2ba5205a8fd73 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numbers
import numpy as np
from astropy.units import (
CompositeUnit,
Unit,
UnitConversionError,
UnitsError,
UnitTypeError,
dimensionless_unscaled,
photometric,
)
from .core import FunctionQuantity, FunctionUnitBase
from .units import dB, dex, mag
__all__ = [
"LogUnit",
"MagUnit",
"DexUnit",
"DecibelUnit",
"LogQuantity",
"Magnitude",
"Decibel",
"Dex",
"STmag",
"ABmag",
"M_bol",
"m_bol",
]
class LogUnit(FunctionUnitBase):
"""Logarithmic unit containing a physical one.
Usually, logarithmic units are instantiated via specific subclasses
such `~astropy.units.MagUnit`, `~astropy.units.DecibelUnit`, and
`~astropy.units.DexUnit`.
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the logarithmic function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, the same as the logarithmic unit set by the subclass.
"""
# the four essential overrides of FunctionUnitBase
@property
def _default_function_unit(self):
return dex
@property
def _quantity_class(self):
return LogQuantity
def from_physical(self, x):
"""Transformation from value in physical to value in logarithmic units.
Used in equivalency.
"""
return dex.to(self._function_unit, np.log10(x))
def to_physical(self, x):
"""Transformation from value in logarithmic to value in physical units.
Used in equivalency.
"""
return 10 ** self._function_unit.to(dex, x)
# ^^^^ the four essential overrides of FunctionUnitBase
# add addition and subtraction, which imply multiplication/division of
# the underlying physical units
def _add_and_adjust_physical_unit(self, other, sign_self, sign_other):
"""Add/subtract LogUnit to/from another unit, and adjust physical unit.
self and other are multiplied by sign_self and sign_other, resp.
We wish to do: ±lu_1 + ±lu_2 -> lu_f (lu=logarithmic unit)
and pu_1^(±1) * pu_2^(±1) -> pu_f (pu=physical unit)
Raises
------
UnitsError
If function units are not equivalent.
"""
# First, insist on compatible logarithmic type. Here, plain u.mag,
# u.dex, and u.dB are OK, i.e., other does not have to be LogUnit
# (this will indirectly test whether other is a unit at all).
try:
getattr(other, "function_unit", other)._to(self._function_unit)
except AttributeError:
# if other is not a unit (i.e., does not have _to).
return NotImplemented
except UnitsError:
raise UnitsError(
"Can only add/subtract logarithmic units of compatible type."
)
other_physical_unit = getattr(other, "physical_unit", dimensionless_unscaled)
physical_unit = CompositeUnit(
1, [self._physical_unit, other_physical_unit], [sign_self, sign_other]
)
return self._copy(physical_unit)
def __neg__(self):
return self._copy(self.physical_unit ** (-1))
def __add__(self, other):
# Only know how to add to a logarithmic unit with compatible type,
# be it a plain one (u.mag, etc.,) or another LogUnit
return self._add_and_adjust_physical_unit(other, +1, +1)
def __radd__(self, other):
return self._add_and_adjust_physical_unit(other, +1, +1)
def __sub__(self, other):
return self._add_and_adjust_physical_unit(other, +1, -1)
def __rsub__(self, other):
# here, in normal usage other cannot be LogUnit; only equivalent one
# would be u.mag,u.dB,u.dex. But might as well use common routine.
return self._add_and_adjust_physical_unit(other, -1, +1)
class MagUnit(LogUnit):
"""Logarithmic physical units expressed in magnitudes.
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the magnitude function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, this is ``mag``, but this allows one to use an equivalent
unit such as ``2 mag``.
"""
@property
def _default_function_unit(self):
return mag
@property
def _quantity_class(self):
return Magnitude
class DexUnit(LogUnit):
"""Logarithmic physical units expressed in magnitudes.
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the magnitude function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, this is ``dex``, but this allows one to use an equivalent
unit such as ``0.5 dex``.
"""
@property
def _default_function_unit(self):
return dex
@property
def _quantity_class(self):
return Dex
def to_string(self, format="generic"):
if format == "cds":
if self.physical_unit == dimensionless_unscaled:
return "[-]" # by default, would get "[---]".
else:
return f"[{self.physical_unit.to_string(format=format)}]"
else:
return super().to_string()
class DecibelUnit(LogUnit):
"""Logarithmic physical units expressed in dB.
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the decibel function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, this is ``dB``, but this allows one to use an equivalent
unit such as ``2 dB``.
"""
@property
def _default_function_unit(self):
return dB
@property
def _quantity_class(self):
return Decibel
class LogQuantity(FunctionQuantity):
"""A representation of a (scaled) logarithm of a number with a unit.
Parameters
----------
value : number, `~astropy.units.Quantity`, `~astropy.units.LogQuantity`, or sequence of quantity-like.
The numerical value of the logarithmic quantity. If a number or
a `~astropy.units.Quantity` with a logarithmic unit, it will be
converted to ``unit`` and the physical unit will be inferred from
``unit``. If a `~astropy.units.Quantity` with just a physical unit,
it will converted to the logarithmic unit, after, if necessary,
converting it to the physical unit inferred from ``unit``.
unit : str, `~astropy.units.UnitBase`, or `~astropy.units.FunctionUnitBase`, optional
For an `~astropy.units.FunctionUnitBase` instance, the
physical unit will be taken from it; for other input, it will be
inferred from ``value``. By default, ``unit`` is set by the subclass.
dtype : `~numpy.dtype`, optional
The ``dtype`` of the resulting Numpy array or scalar that will
hold the value. If not provided, is is determined automatically
from the input value.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
Examples
--------
Typically, use is made of an `~astropy.units.FunctionQuantity`
subclass, as in::
>>> import astropy.units as u
>>> u.Magnitude(-2.5)
<Magnitude -2.5 mag>
>>> u.Magnitude(10.*u.count/u.second)
<Magnitude -2.5 mag(ct / s)>
>>> u.Decibel(1.*u.W, u.DecibelUnit(u.mW)) # doctest: +FLOAT_CMP
<Decibel 30. dB(mW)>
"""
# only override of FunctionQuantity
_unit_class = LogUnit
# additions that work just for logarithmic units
def __add__(self, other):
# Add function units, thus multiplying physical units. If no unit is
# given, assume dimensionless_unscaled; this will give the appropriate
# exception in LogUnit.__add__.
new_unit = self.unit + getattr(other, "unit", dimensionless_unscaled)
# Add actual logarithmic values, rescaling, e.g., dB -> dex.
result = self._function_view + getattr(other, "_function_view", other)
return self._new_view(result, new_unit)
def __radd__(self, other):
return self.__add__(other)
def __iadd__(self, other):
new_unit = self.unit + getattr(other, "unit", dimensionless_unscaled)
# Do calculation in-place using _function_view of array.
function_view = self._function_view
function_view += getattr(other, "_function_view", other)
self._set_unit(new_unit)
return self
def __sub__(self, other):
# Subtract function units, thus dividing physical units.
new_unit = self.unit - getattr(other, "unit", dimensionless_unscaled)
# Subtract actual logarithmic values, rescaling, e.g., dB -> dex.
result = self._function_view - getattr(other, "_function_view", other)
return self._new_view(result, new_unit)
def __rsub__(self, other):
new_unit = self.unit.__rsub__(getattr(other, "unit", dimensionless_unscaled))
result = self._function_view.__rsub__(getattr(other, "_function_view", other))
# Ensure the result is in right function unit scale
# (with rsub, this does not have to be one's own).
result = result.to(new_unit.function_unit)
return self._new_view(result, new_unit)
def __isub__(self, other):
new_unit = self.unit - getattr(other, "unit", dimensionless_unscaled)
# Do calculation in-place using _function_view of array.
function_view = self._function_view
function_view -= getattr(other, "_function_view", other)
self._set_unit(new_unit)
return self
def __mul__(self, other):
# Multiply by a float or a dimensionless quantity
if isinstance(other, numbers.Number):
# Multiplying a log means putting the factor into the exponent
# of the unit
new_physical_unit = self.unit.physical_unit**other
result = self.view(np.ndarray) * other
return self._new_view(result, self.unit._copy(new_physical_unit))
else:
return super().__mul__(other)
def __rmul__(self, other):
return self.__mul__(other)
def __imul__(self, other):
if isinstance(other, numbers.Number):
new_physical_unit = self.unit.physical_unit**other
function_view = self._function_view
function_view *= other
self._set_unit(self.unit._copy(new_physical_unit))
return self
else:
return super().__imul__(other)
def __truediv__(self, other):
# Divide by a float or a dimensionless quantity
if isinstance(other, numbers.Number):
# Dividing a log means putting the nominator into the exponent
# of the unit
new_physical_unit = self.unit.physical_unit ** (1 / other)
result = self.view(np.ndarray) / other
return self._new_view(result, self.unit._copy(new_physical_unit))
else:
return super().__truediv__(other)
def __itruediv__(self, other):
if isinstance(other, numbers.Number):
new_physical_unit = self.unit.physical_unit ** (1 / other)
function_view = self._function_view
function_view /= other
self._set_unit(self.unit._copy(new_physical_unit))
return self
else:
return super().__itruediv__(other)
def __pow__(self, other):
# We check if this power is OK by applying it first to the unit.
try:
other = float(other)
except TypeError:
return NotImplemented
new_unit = self.unit**other
new_value = self.view(np.ndarray) ** other
return self._new_view(new_value, new_unit)
def __ilshift__(self, other):
try:
other = Unit(other)
except UnitTypeError:
return NotImplemented
if not isinstance(other, self._unit_class):
return NotImplemented
try:
factor = self.unit.physical_unit._to(other.physical_unit)
except UnitConversionError:
# Maybe via equivalencies? Now we do make a temporary copy.
try:
value = self._to_value(other)
except UnitConversionError:
return NotImplemented
self.view(np.ndarray)[...] = value
else:
self.view(np.ndarray)[...] += self.unit.from_physical(factor)
self._set_unit(other)
return self
# Methods that do not work for function units generally but are OK for
# logarithmic units as they imply differences and independence of
# physical unit.
def var(self, axis=None, dtype=None, out=None, ddof=0):
unit = self.unit.function_unit**2
return self._wrap_function(np.var, axis, dtype, out=out, ddof=ddof, unit=unit)
def std(self, axis=None, dtype=None, out=None, ddof=0):
unit = self.unit._copy(dimensionless_unscaled)
return self._wrap_function(np.std, axis, dtype, out=out, ddof=ddof, unit=unit)
def ptp(self, axis=None, out=None):
unit = self.unit._copy(dimensionless_unscaled)
return self._wrap_function(np.ptp, axis, out=out, unit=unit)
def diff(self, n=1, axis=-1):
unit = self.unit._copy(dimensionless_unscaled)
return self._wrap_function(np.diff, n, axis, unit=unit)
def ediff1d(self, to_end=None, to_begin=None):
unit = self.unit._copy(dimensionless_unscaled)
return self._wrap_function(np.ediff1d, to_end, to_begin, unit=unit)
_supported_functions = FunctionQuantity._supported_functions | {
getattr(np, function) for function in ("var", "std", "ptp", "diff", "ediff1d")
}
class Dex(LogQuantity):
_unit_class = DexUnit
class Decibel(LogQuantity):
_unit_class = DecibelUnit
class Magnitude(LogQuantity):
_unit_class = MagUnit
dex._function_unit_class = DexUnit
dB._function_unit_class = DecibelUnit
mag._function_unit_class = MagUnit
STmag = MagUnit(photometric.STflux)
STmag.__doc__ = "ST magnitude: STmag=-21.1 corresponds to 1 erg/s/cm2/A"
ABmag = MagUnit(photometric.ABflux)
ABmag.__doc__ = "AB magnitude: ABmag=-48.6 corresponds to 1 erg/s/cm2/Hz"
M_bol = MagUnit(photometric.Bol)
M_bol.__doc__ = (
f"Absolute bolometric magnitude: M_bol=0 corresponds to L_bol0={photometric.Bol.si}"
)
m_bol = MagUnit(photometric.bol)
m_bol.__doc__ = (
f"Apparent bolometric magnitude: m_bol=0 corresponds to f_bol0={photometric.bol.si}"
)
|
a349c6d283a3f0a918be9f2d5e59b9c52c5649b9999fac435309b0fc2d40fa1c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Function Units and Quantities."""
from abc import ABCMeta, abstractmethod
import numpy as np
from astropy.units import (
Quantity,
Unit,
UnitBase,
UnitConversionError,
UnitsError,
UnitTypeError,
dimensionless_unscaled,
)
__all__ = ["FunctionUnitBase", "FunctionQuantity"]
SUPPORTED_UFUNCS = {
getattr(np.core.umath, ufunc)
for ufunc in (
"isfinite",
"isinf",
"isnan",
"sign",
"signbit",
"rint",
"floor",
"ceil",
"trunc",
"_ones_like",
"ones_like",
"positive",
)
if hasattr(np.core.umath, ufunc)
}
# TODO: the following could work if helper changed relative to Quantity:
# - spacing should return dimensionless, not same unit
# - negative should negate unit too,
# - add, subtract, comparisons can work if units added/subtracted
SUPPORTED_FUNCTIONS = {
getattr(np, function)
for function in ("clip", "trace", "mean", "min", "max", "round")
}
# subclassing UnitBase or CompositeUnit was found to be problematic, requiring
# a large number of overrides. Hence, define new class.
class FunctionUnitBase(metaclass=ABCMeta):
"""Abstract base class for function units.
Function units are functions containing a physical unit, such as dB(mW).
Most of the arithmetic operations on function units are defined in this
base class.
While instantiation is defined, this class should not be used directly.
Rather, subclasses should be used that override the abstract properties
`_default_function_unit` and `_quantity_class`, and the abstract methods
`from_physical`, and `to_physical`.
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, the same as the function unit set by the subclass.
"""
# ↓↓↓ the following four need to be set by subclasses
# Make this a property so we can ensure subclasses define it.
@property
@abstractmethod
def _default_function_unit(self):
"""Default function unit corresponding to the function.
This property should be overridden by subclasses, with, e.g.,
`~astropy.unit.MagUnit` returning `~astropy.unit.mag`.
"""
# This has to be a property because the function quantity will not be
# known at unit definition time, as it gets defined after.
@property
@abstractmethod
def _quantity_class(self):
"""Function quantity class corresponding to this function unit.
This property should be overridden by subclasses, with, e.g.,
`~astropy.unit.MagUnit` returning `~astropy.unit.Magnitude`.
"""
@abstractmethod
def from_physical(self, x):
"""Transformation from value in physical to value in function units.
This method should be overridden by subclasses. It is used to
provide automatic transformations using an equivalency.
"""
@abstractmethod
def to_physical(self, x):
"""Transformation from value in function to value in physical units.
This method should be overridden by subclasses. It is used to
provide automatic transformations using an equivalency.
"""
# ↑↑↑ the above four need to be set by subclasses
# have priority over arrays, regular units, and regular quantities
__array_priority__ = 30000
def __init__(self, physical_unit=None, function_unit=None):
if physical_unit is None:
physical_unit = dimensionless_unscaled
else:
physical_unit = Unit(physical_unit)
if not isinstance(physical_unit, UnitBase) or physical_unit.is_equivalent(
self._default_function_unit
):
raise UnitConversionError(f"{physical_unit} is not a physical unit.")
if function_unit is None:
function_unit = self._default_function_unit
else:
# any function unit should be equivalent to subclass default
function_unit = Unit(getattr(function_unit, "function_unit", function_unit))
if not function_unit.is_equivalent(self._default_function_unit):
raise UnitConversionError(
f"Cannot initialize '{self.__class__.__name__}' instance with "
f"function unit '{function_unit}', as it is not equivalent to "
f"default function unit '{self._default_function_unit}'."
)
self._physical_unit = physical_unit
self._function_unit = function_unit
def _copy(self, physical_unit=None):
"""Copy oneself, possibly with a different physical unit."""
if physical_unit is None:
physical_unit = self.physical_unit
return self.__class__(physical_unit, self.function_unit)
@property
def physical_unit(self):
return self._physical_unit
@property
def function_unit(self):
return self._function_unit
@property
def equivalencies(self):
"""List of equivalencies between function and physical units.
Uses the `from_physical` and `to_physical` methods.
"""
return [(self, self.physical_unit, self.to_physical, self.from_physical)]
# ↓↓↓ properties/methods required to behave like a unit
def decompose(self, bases=set()):
"""Copy the current unit with the physical unit decomposed.
For details, see `~astropy.units.UnitBase.decompose`.
"""
return self._copy(self.physical_unit.decompose(bases))
@property
def si(self):
"""Copy the current function unit with the physical unit in SI."""
return self._copy(self.physical_unit.si)
@property
def cgs(self):
"""Copy the current function unit with the physical unit in CGS."""
return self._copy(self.physical_unit.cgs)
def _get_physical_type_id(self):
"""Get physical type corresponding to physical unit."""
return self.physical_unit._get_physical_type_id()
@property
def physical_type(self):
"""Return the physical type of the physical unit (e.g., 'length')."""
return self.physical_unit.physical_type
def is_equivalent(self, other, equivalencies=[]):
"""
Returns `True` if this unit is equivalent to ``other``.
Parameters
----------
other : `~astropy.units.Unit`, string, or tuple
The unit to convert to. If a tuple of units is specified, this
method returns true if the unit matches any of those in the tuple.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in addition to the built-in equivalencies between the
function unit and the physical one, as well as possible global
defaults set by, e.g., `~astropy.units.set_enabled_equivalencies`.
Use `None` to turn off any global equivalencies.
Returns
-------
bool
"""
if isinstance(other, tuple):
return any(self.is_equivalent(u, equivalencies) for u in other)
other_physical_unit = getattr(
other,
"physical_unit",
(
dimensionless_unscaled
if self.function_unit.is_equivalent(other)
else other
),
)
return self.physical_unit.is_equivalent(other_physical_unit, equivalencies)
def to(self, other, value=1.0, equivalencies=[]):
"""
Return the converted values in the specified unit.
Parameters
----------
other : `~astropy.units.Unit`, `~astropy.units.FunctionUnitBase`, or str
The unit to convert to.
value : int, float, or scalar array-like, optional
Value(s) in the current unit to be converted to the specified unit.
If not provided, defaults to 1.0.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in meant to treat only equivalencies between different
physical units; the built-in equivalency between the function
unit and the physical one is automatically taken into account.
Returns
-------
values : scalar or array
Converted value(s). Input value sequences are returned as
numpy arrays.
Raises
------
`~astropy.units.UnitsError`
If units are inconsistent.
"""
# conversion to one's own physical unit should be fastest
if other is self.physical_unit:
return self.to_physical(value)
other_function_unit = getattr(other, "function_unit", other)
if self.function_unit.is_equivalent(other_function_unit):
# when other is an equivalent function unit:
# first convert physical units to other's physical units
other_physical_unit = getattr(
other, "physical_unit", dimensionless_unscaled
)
if self.physical_unit != other_physical_unit:
value_other_physical = self.physical_unit.to(
other_physical_unit, self.to_physical(value), equivalencies
)
# make function unit again, in own system
value = self.from_physical(value_other_physical)
# convert possible difference in function unit (e.g., dex->dB)
return self.function_unit.to(other_function_unit, value)
else:
try:
# when other is not a function unit
return self.physical_unit.to(
other, self.to_physical(value), equivalencies
)
except UnitConversionError as e:
if self.function_unit == Unit("mag"):
# One can get to raw magnitudes via math that strips the dimensions off.
# Include extra information in the exception to remind users of this.
msg = "Did you perhaps subtract magnitudes so the unit got lost?"
e.args += (msg,)
raise e
else:
raise
def is_unity(self):
return False
def __eq__(self, other):
return self.physical_unit == getattr(
other, "physical_unit", dimensionless_unscaled
) and self.function_unit == getattr(other, "function_unit", other)
def __ne__(self, other):
return not self.__eq__(other)
def __rlshift__(self, other):
"""Unit conversion operator ``<<``."""
try:
return self._quantity_class(other, self, copy=False, subok=True)
except Exception:
return NotImplemented
def __mul__(self, other):
if isinstance(other, (str, UnitBase, FunctionUnitBase)):
if self.physical_unit == dimensionless_unscaled:
# If dimensionless, drop back to normal unit and retry.
return self.function_unit * other
else:
raise UnitsError(
"Cannot multiply a function unit with a physical dimension "
"with any unit."
)
else:
# Anything not like a unit, try initialising as a function quantity.
try:
return self._quantity_class(other, unit=self)
except Exception:
return NotImplemented
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
if isinstance(other, (str, UnitBase, FunctionUnitBase)):
if self.physical_unit == dimensionless_unscaled:
# If dimensionless, drop back to normal unit and retry.
return self.function_unit / other
else:
raise UnitsError(
"Cannot divide a function unit with a physical dimension "
"by any unit."
)
else:
# Anything not like a unit, try initialising as a function quantity.
try:
return self._quantity_class(1.0 / other, unit=self)
except Exception:
return NotImplemented
def __rtruediv__(self, other):
if isinstance(other, (str, UnitBase, FunctionUnitBase)):
if self.physical_unit == dimensionless_unscaled:
# If dimensionless, drop back to normal unit and retry.
return other / self.function_unit
else:
raise UnitsError(
"Cannot divide a function unit with a physical dimension "
"into any unit"
)
else:
# Don't know what to do with anything not like a unit.
return NotImplemented
def __pow__(self, power):
if power == 0:
return dimensionless_unscaled
elif power == 1:
return self._copy()
if self.physical_unit == dimensionless_unscaled:
return self.function_unit**power
raise UnitsError(
"Cannot raise a function unit with a physical dimension "
"to any power but 0 or 1."
)
def __pos__(self):
return self._copy()
def to_string(self, format="generic"):
"""
Output the unit in the given format as a string.
The physical unit is appended, within parentheses, to the function
unit, as in "dB(mW)", with both units set using the given format
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to the generic format.
"""
if format not in ("generic", "unscaled", "latex", "latex_inline"):
raise ValueError(
f"Function units cannot be written in {format} "
"format. Only 'generic', 'unscaled', 'latex' and "
"'latex_inline' are supported."
)
self_str = self.function_unit.to_string(format)
pu_str = self.physical_unit.to_string(format)
if pu_str == "":
pu_str = "1"
if format.startswith("latex"):
# need to strip leading and trailing "$"
self_str += rf"$\mathrm{{\left( {pu_str[1:-1]} \right)}}$"
else:
self_str += f"({pu_str})"
return self_str
def __str__(self):
"""Return string representation for unit."""
self_str = str(self.function_unit)
pu_str = str(self.physical_unit)
if pu_str:
self_str += f"({pu_str})"
return self_str
def __repr__(self):
# By default, try to give a representation using `Unit(<string>)`,
# with string such that parsing it would give the correct FunctionUnit.
if callable(self.function_unit):
return f'Unit("{self.to_string()}")'
else:
return '{}("{}"{})'.format(
self.__class__.__name__,
self.physical_unit,
""
if self.function_unit is self._default_function_unit
else f', unit="{self.function_unit}"',
)
def _repr_latex_(self):
"""
Generate latex representation of unit name. This is used by
the IPython notebook to print a unit with a nice layout.
Returns
-------
Latex string
"""
return self.to_string("latex")
def __hash__(self):
return hash((self.function_unit, self.physical_unit))
class FunctionQuantity(Quantity):
"""A representation of a (scaled) function of a number with a unit.
Function quantities are quantities whose units are functions containing a
physical unit, such as dB(mW). Most of the arithmetic operations on
function quantities are defined in this base class.
While instantiation is also defined here, this class should not be
instantiated directly. Rather, subclasses should be made which have
``_unit_class`` pointing back to the corresponding function unit class.
Parameters
----------
value : number, quantity-like, or sequence thereof
The numerical value of the function quantity. If a number or
a `~astropy.units.Quantity` with a function unit, it will be converted
to ``unit`` and the physical unit will be inferred from ``unit``.
If a `~astropy.units.Quantity` with just a physical unit, it will
converted to the function unit, after, if necessary, converting it to
the physical unit inferred from ``unit``.
unit : str, `~astropy.units.UnitBase`, or `~astropy.units.FunctionUnitBase`, optional
For an `~astropy.units.FunctionUnitBase` instance, the
physical unit will be taken from it; for other input, it will be
inferred from ``value``. By default, ``unit`` is set by the subclass.
dtype : `~numpy.dtype`, optional
The dtype of the resulting Numpy array or scalar that will
hold the value. If not provided, it is determined from the input,
except that any input that cannot represent float (integer and bool)
is converted to float.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
order : {'C', 'F', 'A'}, optional
Specify the order of the array. As in `~numpy.array`. Ignored
if the input does not need to be converted and ``copy=False``.
subok : bool, optional
If `False` (default), the returned array will be forced to be of the
class used. Otherwise, subclasses will be passed through.
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting array
should have. Ones will be prepended to the shape as needed to meet
this requirement. This parameter is ignored if the input is a
`~astropy.units.Quantity` and ``copy=False``.
Raises
------
TypeError
If the value provided is not a Python numeric type.
TypeError
If the unit provided is not a `~astropy.units.FunctionUnitBase`
or `~astropy.units.Unit` object, or a parseable string unit.
"""
_unit_class = None
"""Default `~astropy.units.FunctionUnitBase` subclass.
This should be overridden by subclasses.
"""
# Ensure priority over ndarray, regular Unit & Quantity, and FunctionUnit.
__array_priority__ = 40000
# Define functions that work on FunctionQuantity.
_supported_ufuncs = SUPPORTED_UFUNCS
_supported_functions = SUPPORTED_FUNCTIONS
def __new__(
cls,
value,
unit=None,
dtype=np.inexact,
copy=True,
order=None,
subok=False,
ndmin=0,
):
if unit is not None:
# Convert possible string input to a (function) unit.
unit = Unit(unit)
if not isinstance(unit, FunctionUnitBase):
# By default, use value's physical unit.
value_unit = getattr(value, "unit", None)
if value_unit is None:
# if iterable, see if first item has a unit
# (mixed lists fail in super call below).
try:
value_unit = value[0].unit
except Exception:
pass
physical_unit = getattr(value_unit, "physical_unit", value_unit)
unit = cls._unit_class(physical_unit, function_unit=unit)
# initialise!
return super().__new__(
cls,
value,
unit,
dtype=dtype,
copy=copy,
order=order,
subok=subok,
ndmin=ndmin,
)
# ↓↓↓ properties not found in Quantity
@property
def physical(self):
"""The physical quantity corresponding the function one."""
return self.to(self.unit.physical_unit)
@property
def _function_view(self):
"""View as Quantity with function unit, dropping the physical unit.
Use `~astropy.units.quantity.Quantity.value` for just the value.
"""
return self._new_view(unit=self.unit.function_unit)
# ↓↓↓ methods overridden to change the behavior
@property
def si(self):
"""Return a copy with the physical unit in SI units."""
return self.__class__(self.physical.si)
@property
def cgs(self):
"""Return a copy with the physical unit in CGS units."""
return self.__class__(self.physical.cgs)
def decompose(self, bases=[]):
"""Generate a new instance with the physical unit decomposed.
For details, see `~astropy.units.Quantity.decompose`.
"""
return self.__class__(self.physical.decompose(bases))
# ↓↓↓ methods overridden to add additional behavior
def __quantity_subclass__(self, unit):
if isinstance(unit, FunctionUnitBase):
return self.__class__, True
else:
return super().__quantity_subclass__(unit)[0], False
def _set_unit(self, unit):
if not isinstance(unit, self._unit_class):
# Have to take care of, e.g., (10*u.mag).view(u.Magnitude)
try:
# "or 'nonsense'" ensures `None` breaks, just in case.
unit = self._unit_class(function_unit=unit or "nonsense")
except Exception:
raise UnitTypeError(
f"{type(self).__name__} instances require"
f" {self._unit_class.__name__} function units, so cannot set it to"
f" '{unit}'."
)
self._unit = unit
def __array_ufunc__(self, function, method, *inputs, **kwargs):
# TODO: it would be more logical to have this in Quantity already,
# instead of in UFUNC_HELPERS, where it cannot be overridden.
# And really it should just return NotImplemented, since possibly
# another argument might know what to do.
if function not in self._supported_ufuncs:
raise UnitTypeError(
f"Cannot use ufunc '{function.__name__}' with function quantities"
)
return super().__array_ufunc__(function, method, *inputs, **kwargs)
def _maybe_new_view(self, result):
"""View as function quantity if the unit is unchanged.
Used for the case that self.unit.physical_unit is dimensionless,
where multiplication and division is done using the Quantity
equivalent, to transform them back to a FunctionQuantity if possible.
"""
if isinstance(result, Quantity) and result.unit == self.unit:
return self._new_view(result)
else:
return result
# ↓↓↓ methods overridden to change behavior
def __mul__(self, other):
if self.unit.physical_unit == dimensionless_unscaled:
return self._maybe_new_view(self._function_view * other)
raise UnitTypeError(
"Cannot multiply function quantities which are not dimensionless "
"with anything."
)
def __truediv__(self, other):
if self.unit.physical_unit == dimensionless_unscaled:
return self._maybe_new_view(self._function_view / other)
raise UnitTypeError(
"Cannot divide function quantities which are not dimensionless by anything."
)
def __rtruediv__(self, other):
if self.unit.physical_unit == dimensionless_unscaled:
return self._maybe_new_view(self._function_view.__rtruediv__(other))
raise UnitTypeError(
"Cannot divide function quantities which are not dimensionless "
"into anything."
)
def _comparison(self, other, comparison_func):
"""Do a comparison between self and other, raising UnitsError when
other cannot be converted to self because it has different physical
unit, and returning NotImplemented when there are other errors.
"""
try:
# will raise a UnitsError if physical units not equivalent
other_in_own_unit = self._to_own_unit(other, check_precision=False)
except UnitsError as exc:
if self.unit.physical_unit != dimensionless_unscaled:
raise exc
try:
other_in_own_unit = self._function_view._to_own_unit(
other, check_precision=False
)
except Exception:
raise exc
except Exception:
return NotImplemented
return comparison_func(other_in_own_unit)
def __eq__(self, other):
try:
return self._comparison(other, self.value.__eq__)
except UnitsError:
return False
def __ne__(self, other):
try:
return self._comparison(other, self.value.__ne__)
except UnitsError:
return True
def __gt__(self, other):
return self._comparison(other, self.value.__gt__)
def __ge__(self, other):
return self._comparison(other, self.value.__ge__)
def __lt__(self, other):
return self._comparison(other, self.value.__lt__)
def __le__(self, other):
return self._comparison(other, self.value.__le__)
def __lshift__(self, other):
"""Unit conversion operator `<<`."""
try:
other = Unit(other, parse_strict="silent")
except UnitTypeError:
return NotImplemented
return self.__class__(self, other, copy=False, subok=True)
# Ensure Quantity methods are used only if they make sense.
def _wrap_function(self, function, *args, **kwargs):
if function in self._supported_functions:
return super()._wrap_function(function, *args, **kwargs)
# For dimensionless, we can convert to regular quantities.
if all(
arg.unit.physical_unit == dimensionless_unscaled
for arg in (self,) + args
if (hasattr(arg, "unit") and hasattr(arg.unit, "physical_unit"))
):
args = tuple(getattr(arg, "_function_view", arg) for arg in args)
return self._function_view._wrap_function(function, *args, **kwargs)
raise TypeError(
f"Cannot use method that uses function '{function.__name__}' with "
"function quantities that are not dimensionless."
)
# Override functions that are supported but do not use _wrap_function
# in Quantity.
def max(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.max, axis, out=out, keepdims=keepdims)
def min(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.min, axis, out=out, keepdims=keepdims)
def sum(self, axis=None, dtype=None, out=None, keepdims=False):
return self._wrap_function(np.sum, axis, dtype, out=out, keepdims=keepdims)
def cumsum(self, axis=None, dtype=None, out=None):
return self._wrap_function(np.cumsum, axis, dtype, out=out)
def clip(self, a_min, a_max, out=None):
return self._wrap_function(
np.clip, self._to_own_unit(a_min), self._to_own_unit(a_max), out=out
)
|
30d5980e90fe62d27003d93c43591973e66170c11ca32f4a57dc885563bda4dd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy import units as u
from astropy.coordinates import earth_orientation as earth
from astropy.coordinates.attributes import TimeAttribute
from astropy.coordinates.baseframe import base_doc, frame_transform_graph
from astropy.coordinates.representation import (
CartesianRepresentation,
UnitSphericalRepresentation,
)
from astropy.coordinates.transformations import (
DynamicMatrixTransform,
FunctionTransformWithFiniteDifference,
)
from astropy.utils.decorators import format_doc
from .baseradec import BaseRADecFrame, doc_components
from .utils import EQUINOX_B1950
__all__ = ["FK4", "FK4NoETerms"]
doc_footer_fk4 = """
Other parameters
----------------
equinox : `~astropy.time.Time`
The equinox of this frame.
obstime : `~astropy.time.Time`
The time this frame was observed. If ``None``, will be the same as
``equinox``.
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer_fk4)
class FK4(BaseRADecFrame):
"""
A coordinate or frame in the FK4 system.
Note that this is a barycentric version of FK4 - that is, the origin for
this frame is the Solar System Barycenter, *not* the Earth geocenter.
The frame attributes are listed under **Other Parameters**.
"""
equinox = TimeAttribute(default=EQUINOX_B1950)
obstime = TimeAttribute(default=None, secondary_attribute="equinox")
# the "self" transform
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, FK4, FK4)
def fk4_to_fk4(fk4coord1, fk4frame2):
# deceptively complicated: need to transform to No E-terms FK4, precess, and
# then come back, because precession is non-trivial with E-terms
fnoe_w_eqx1 = fk4coord1.transform_to(FK4NoETerms(equinox=fk4coord1.equinox))
fnoe_w_eqx2 = fnoe_w_eqx1.transform_to(FK4NoETerms(equinox=fk4frame2.equinox))
return fnoe_w_eqx2.transform_to(fk4frame2)
@format_doc(base_doc, components=doc_components, footer=doc_footer_fk4)
class FK4NoETerms(BaseRADecFrame):
"""
A coordinate or frame in the FK4 system, but with the E-terms of aberration
removed.
The frame attributes are listed under **Other Parameters**.
"""
equinox = TimeAttribute(default=EQUINOX_B1950)
obstime = TimeAttribute(default=None, secondary_attribute="equinox")
@staticmethod
def _precession_matrix(oldequinox, newequinox):
"""
Compute and return the precession matrix for FK4 using Newcomb's method.
Used inside some of the transformation functions.
Parameters
----------
oldequinox : `~astropy.time.Time`
The equinox to precess from.
newequinox : `~astropy.time.Time`
The equinox to precess to.
Returns
-------
newcoord : array
The precession matrix to transform to the new equinox
"""
return earth._precession_matrix_besselian(oldequinox.byear, newequinox.byear)
# the "self" transform
@frame_transform_graph.transform(DynamicMatrixTransform, FK4NoETerms, FK4NoETerms)
def fk4noe_to_fk4noe(fk4necoord1, fk4neframe2):
return fk4necoord1._precession_matrix(fk4necoord1.equinox, fk4neframe2.equinox)
# FK4-NO-E to/from FK4 ----------------------------->
# Unlike other frames, this module include *two* frame classes for FK4
# coordinates - one including the E-terms of aberration (FK4), and
# one not including them (FK4NoETerms). The following functions
# implement the transformation between these two.
def fk4_e_terms(equinox):
"""
Return the e-terms of aberration vector.
Parameters
----------
equinox : Time object
The equinox for which to compute the e-terms
"""
# Constant of aberration at J2000; from Explanatory Supplement to the
# Astronomical Almanac (Seidelmann, 2005).
k = 0.0056932 # in degrees (v_earth/c ~ 1e-4 rad ~ 0.0057 deg)
k = np.radians(k)
# Eccentricity of the Earth's orbit
e = earth.eccentricity(equinox.jd)
# Mean longitude of perigee of the solar orbit
g = earth.mean_lon_of_perigee(equinox.jd)
g = np.radians(g)
# Obliquity of the ecliptic
o = earth.obliquity(equinox.jd, algorithm=1980)
o = np.radians(o)
return (
e * k * np.sin(g),
-e * k * np.cos(g) * np.cos(o),
-e * k * np.cos(g) * np.sin(o),
)
@frame_transform_graph.transform(
FunctionTransformWithFiniteDifference, FK4, FK4NoETerms
)
def fk4_to_fk4_no_e(fk4coord, fk4noeframe):
# Extract cartesian vector
rep = fk4coord.cartesian
# Find distance (for re-normalization)
d_orig = rep.norm()
rep /= d_orig
# Apply E-terms of aberration. Note that this depends on the equinox (not
# the observing time/epoch) of the coordinates. See issue #1496 for a
# discussion of this.
eterms_a = CartesianRepresentation(
u.Quantity(fk4_e_terms(fk4coord.equinox), u.dimensionless_unscaled, copy=False),
copy=False,
)
rep = rep - eterms_a + eterms_a.dot(rep) * rep
# Find new distance (for re-normalization)
d_new = rep.norm()
# Renormalize
rep *= d_orig / d_new
# now re-cast into an appropriate Representation, and precess if need be
if isinstance(fk4coord.data, UnitSphericalRepresentation):
rep = rep.represent_as(UnitSphericalRepresentation)
# if no obstime was given in the new frame, use the old one for consistency
newobstime = (
fk4coord._obstime if fk4noeframe._obstime is None else fk4noeframe._obstime
)
fk4noe = FK4NoETerms(rep, equinox=fk4coord.equinox, obstime=newobstime)
if fk4coord.equinox != fk4noeframe.equinox:
# precession
fk4noe = fk4noe.transform_to(fk4noeframe)
return fk4noe
@frame_transform_graph.transform(
FunctionTransformWithFiniteDifference, FK4NoETerms, FK4
)
def fk4_no_e_to_fk4(fk4noecoord, fk4frame):
# first precess, if necessary
if fk4noecoord.equinox != fk4frame.equinox:
fk4noe_w_fk4equinox = FK4NoETerms(
equinox=fk4frame.equinox, obstime=fk4noecoord.obstime
)
fk4noecoord = fk4noecoord.transform_to(fk4noe_w_fk4equinox)
# Extract cartesian vector
rep = fk4noecoord.cartesian
# Find distance (for re-normalization)
d_orig = rep.norm()
rep /= d_orig
# Apply E-terms of aberration. Note that this depends on the equinox (not
# the observing time/epoch) of the coordinates. See issue #1496 for a
# discussion of this.
eterms_a = CartesianRepresentation(
u.Quantity(
fk4_e_terms(fk4noecoord.equinox), u.dimensionless_unscaled, copy=False
),
copy=False,
)
rep0 = rep.copy()
for _ in range(10):
rep = (eterms_a + rep0) / (1.0 + eterms_a.dot(rep))
# Find new distance (for re-normalization)
d_new = rep.norm()
# Renormalize
rep *= d_orig / d_new
# now re-cast into an appropriate Representation, and precess if need be
if isinstance(fk4noecoord.data, UnitSphericalRepresentation):
rep = rep.represent_as(UnitSphericalRepresentation)
return fk4frame.realize_frame(rep)
|
f291400dd7058a145ec9f95c85296b1a38333fd9dfe3759739846d3f00cf9553 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.coordinates.attributes import EarthLocationAttribute, TimeAttribute
from astropy.coordinates.baseframe import BaseCoordinateFrame, base_doc
from astropy.coordinates.representation import (
CartesianDifferential,
CartesianRepresentation,
)
from astropy.utils.decorators import format_doc
from .utils import DEFAULT_OBSTIME, EARTH_CENTER
__all__ = ["ITRS"]
doc_footer = """
Other parameters
----------------
obstime : `~astropy.time.Time`
The time at which the observation is taken. Used for determining the
position of the Earth and its precession.
location : `~astropy.coordinates.EarthLocation`
The location on the Earth. This can be specified either as an
`~astropy.coordinates.EarthLocation` object or as anything that can be
transformed to an `~astropy.coordinates.ITRS` frame. The default is the
centre of the Earth.
"""
@format_doc(base_doc, components="", footer=doc_footer)
class ITRS(BaseCoordinateFrame):
"""
A coordinate or frame in the International Terrestrial Reference System
(ITRS). This is approximately a geocentric system, although strictly it is
defined by a series of reference locations near the surface of the Earth (the ITRF).
For more background on the ITRS, see the references provided in the
:ref:`astropy:astropy-coordinates-seealso` section of the documentation.
This frame also includes frames that are defined *relative* to the center of the Earth,
but that are offset (in both position and velocity) from the center of the Earth. You
may see such non-geocentric coordinates referred to as "topocentric".
Topocentric ITRS frames are convenient for observations of near Earth objects where
stellar aberration is not included. One can merely subtract the observing site's
EarthLocation geocentric ITRS coordinates from the object's geocentric ITRS coordinates,
put the resulting vector into a topocentric ITRS frame and then transform to
`~astropy.coordinates.AltAz` or `~astropy.coordinates.HADec`. The other way around is
to transform an observed `~astropy.coordinates.AltAz` or `~astropy.coordinates.HADec`
position to a topocentric ITRS frame and add the observing site's EarthLocation geocentric
ITRS coordinates to yield the object's geocentric ITRS coordinates.
On the other hand, using ``transform_to`` to transform geocentric ITRS coordinates to
topocentric ITRS, observed `~astropy.coordinates.AltAz`, or observed
`~astropy.coordinates.HADec` coordinates includes the difference between stellar aberration
from the point of view of an observer at the geocenter and stellar aberration from the
point of view of an observer on the surface of the Earth. If the geocentric ITRS
coordinates of the object include stellar aberration at the geocenter (e.g. certain ILRS
ephemerides), then this is the way to go.
Note to ILRS ephemeris users: Astropy does not currently consider relativistic
effects of the Earth's gravatational field. Nor do the `~astropy.coordinates.AltAz`
or `~astropy.coordinates.HADec` refraction corrections compute the change in the
range due to the curved path of light through the atmosphere, so Astropy is no
substitute for the ILRS software in these respects.
"""
default_representation = CartesianRepresentation
default_differential = CartesianDifferential
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
location = EarthLocationAttribute(default=EARTH_CENTER)
@property
def earth_location(self):
"""
The data in this frame as an `~astropy.coordinates.EarthLocation` class.
"""
from astropy.coordinates.earth import EarthLocation
cart = self.represent_as(CartesianRepresentation)
return EarthLocation(
x=cart.x + self.location.x,
y=cart.y + self.location.y,
z=cart.z + self.location.z,
)
# Self-transform is in intermediate_rotation_transforms.py with all the other
# ITRS transforms
|
0725afa767d1bc586f37ef7f711a43876cbc91e621802b0cfde22647fd4027a1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains functions/values used repeatedly in different modules of
the ``builtin_frames`` package.
"""
import warnings
import erfa
import numpy as np
from astropy import units as u
from astropy.coordinates.earth import EarthLocation
from astropy.coordinates.representation import CartesianDifferential
from astropy.time import Time
from astropy.utils import iers
from astropy.utils.exceptions import AstropyWarning
# We use tt as the time scale for this equinoxes, primarily because it is the
# convention for J2000 (it is unclear if there is any "right answer" for B1950)
# while #8600 makes this the default behavior, we show it here to ensure it's
# clear which is used here
EQUINOX_J2000 = Time("J2000", scale="tt")
EQUINOX_B1950 = Time("B1950", scale="tt")
# This is a time object that is the default "obstime" when such an attribute is
# necessary. Currently, we use J2000.
DEFAULT_OBSTIME = Time("J2000", scale="tt")
# This is an EarthLocation that is the default "location" when such an attribute is
# necessary. It is the centre of the Earth.
EARTH_CENTER = EarthLocation(0 * u.km, 0 * u.km, 0 * u.km)
PIOVER2 = np.pi / 2.0
# comes from the mean of the 1962-2014 IERS B data
_DEFAULT_PM = (0.035, 0.29) * u.arcsec
def get_polar_motion(time):
"""
gets the two polar motion components in radians for use with apio.
"""
# Get the polar motion from the IERS table
iers_table = iers.earth_orientation_table.get()
xp, yp, status = iers_table.pm_xy(time, return_status=True)
wmsg = (
"Tried to get polar motions for times {} IERS data is "
"valid. Defaulting to polar motion from the 50-yr mean for those. "
"This may affect precision at the arcsec level. Please check your "
"astropy.utils.iers.conf.iers_auto_url and point it to a newer "
"version if necessary."
)
if np.any(status == iers.TIME_BEFORE_IERS_RANGE):
xp[status == iers.TIME_BEFORE_IERS_RANGE] = _DEFAULT_PM[0]
yp[status == iers.TIME_BEFORE_IERS_RANGE] = _DEFAULT_PM[1]
warnings.warn(wmsg.format("before"), AstropyWarning)
if np.any(status == iers.TIME_BEYOND_IERS_RANGE):
xp[status == iers.TIME_BEYOND_IERS_RANGE] = _DEFAULT_PM[0]
yp[status == iers.TIME_BEYOND_IERS_RANGE] = _DEFAULT_PM[1]
warnings.warn(wmsg.format("after"), AstropyWarning)
return xp.to_value(u.radian), yp.to_value(u.radian)
def _warn_iers(ierserr):
"""
Generate a warning for an IERSRangeerror.
Parameters
----------
ierserr : An `~astropy.utils.iers.IERSRangeError`
"""
msg = "{0} Assuming UT1-UTC=0 for coordinate transformations."
warnings.warn(msg.format(ierserr.args[0]), AstropyWarning)
def get_dut1utc(time):
"""
This function is used to get UT1-UTC in coordinates because normally it
gives an error outside the IERS range, but in coordinates we want to allow
it to go through but with a warning.
"""
try:
return time.delta_ut1_utc
except iers.IERSRangeError as e:
_warn_iers(e)
return np.zeros(time.shape)
def get_jd12(time, scale):
"""
Gets ``jd1`` and ``jd2`` from a time object in a particular scale.
Parameters
----------
time : `~astropy.time.Time`
The time to get the jds for
scale : str
The time scale to get the jds for
Returns
-------
jd1 : float
jd2 : float
"""
if time.scale == scale:
newtime = time
else:
try:
newtime = getattr(time, scale)
except iers.IERSRangeError as e:
_warn_iers(e)
newtime = time
return newtime.jd1, newtime.jd2
def norm(p):
"""
Normalise a p-vector.
"""
return p / np.sqrt(np.einsum("...i,...i", p, p))[..., np.newaxis]
def pav2pv(p, v):
"""
Combine p- and v- vectors into a pv-vector.
"""
pv = np.empty(np.broadcast(p, v).shape[:-1], erfa.dt_pv)
pv["p"] = p
pv["v"] = v
return pv
def get_cip(jd1, jd2):
"""
Find the X, Y coordinates of the CIP and the CIO locator, s.
Parameters
----------
jd1 : float or `np.ndarray`
First part of two part Julian date (TDB)
jd2 : float or `np.ndarray`
Second part of two part Julian date (TDB)
Returns
-------
x : float or `np.ndarray`
x coordinate of the CIP
y : float or `np.ndarray`
y coordinate of the CIP
s : float or `np.ndarray`
CIO locator, s
"""
# classical NPB matrix, IAU 2006/2000A
rpnb = erfa.pnm06a(jd1, jd2)
# CIP X, Y coordinates from array
x, y = erfa.bpn2xy(rpnb)
# CIO locator, s
s = erfa.s06(jd1, jd2, x, y)
return x, y, s
def aticq(srepr, astrom):
"""
A slightly modified version of the ERFA function ``eraAticq``.
``eraAticq`` performs the transformations between two coordinate systems,
with the details of the transformation being encoded into the ``astrom`` array.
There are two issues with the version of aticq in ERFA. Both are associated
with the handling of light deflection.
The companion function ``eraAtciqz`` is meant to be its inverse. However, this
is not true for directions close to the Solar centre, since the light deflection
calculations are numerically unstable and therefore not reversible.
This version sidesteps that problem by artificially reducing the light deflection
for directions which are within 90 arcseconds of the Sun's position. This is the
same approach used by the ERFA functions above, except that they use a threshold of
9 arcseconds.
In addition, ERFA's aticq assumes a distant source, so there is no difference between
the object-Sun vector and the observer-Sun vector. This can lead to errors of up to a
few arcseconds in the worst case (e.g a Venus transit).
Parameters
----------
srepr : `~astropy.coordinates.SphericalRepresentation`
Astrometric GCRS or CIRS position of object from observer
astrom : eraASTROM array
ERFA astrometry context, as produced by, e.g. ``eraApci13`` or ``eraApcs13``
Returns
-------
rc : float or `~numpy.ndarray`
Right Ascension in radians
dc : float or `~numpy.ndarray`
Declination in radians
"""
# ignore parallax effects if no distance, or far away
srepr_distance = srepr.distance
ignore_distance = srepr_distance.unit == u.one
# RA, Dec to cartesian unit vectors
pos = erfa.s2c(srepr.lon.radian, srepr.lat.radian)
# Bias-precession-nutation, giving GCRS proper direction.
ppr = erfa.trxp(astrom["bpn"], pos)
# Aberration, giving GCRS natural direction
d = np.zeros_like(ppr)
for j in range(2):
before = norm(ppr - d)
after = erfa.ab(before, astrom["v"], astrom["em"], astrom["bm1"])
d = after - before
pnat = norm(ppr - d)
# Light deflection by the Sun, giving BCRS coordinate direction
d = np.zeros_like(pnat)
for j in range(5):
before = norm(pnat - d)
if ignore_distance:
# No distance to object, assume a long way away
q = before
else:
# Find BCRS direction of Sun to object.
# astrom['eh'] and astrom['em'] contain Sun to observer unit vector,
# and distance, respectively.
eh = astrom["em"][..., np.newaxis] * astrom["eh"]
# unit vector from Sun to object
q = eh + srepr_distance[..., np.newaxis].to_value(u.au) * before
sundist, q = erfa.pn(q)
sundist = sundist[..., np.newaxis]
# calculation above is extremely unstable very close to the sun
# in these situations, default back to ldsun-style behaviour,
# since this is reversible and drops to zero within stellar limb
q = np.where(sundist > 1.0e-10, q, before)
after = erfa.ld(1.0, before, q, astrom["eh"], astrom["em"], 1e-6)
d = after - before
pco = norm(pnat - d)
# ICRS astrometric RA, Dec
rc, dc = erfa.c2s(pco)
return erfa.anp(rc), dc
def atciqz(srepr, astrom):
"""
A slightly modified version of the ERFA function ``eraAtciqz``.
``eraAtciqz`` performs the transformations between two coordinate systems,
with the details of the transformation being encoded into the ``astrom`` array.
There are two issues with the version of atciqz in ERFA. Both are associated
with the handling of light deflection.
The companion function ``eraAticq`` is meant to be its inverse. However, this
is not true for directions close to the Solar centre, since the light deflection
calculations are numerically unstable and therefore not reversible.
This version sidesteps that problem by artificially reducing the light deflection
for directions which are within 90 arcseconds of the Sun's position. This is the
same approach used by the ERFA functions above, except that they use a threshold of
9 arcseconds.
In addition, ERFA's atciqz assumes a distant source, so there is no difference between
the object-Sun vector and the observer-Sun vector. This can lead to errors of up to a
few arcseconds in the worst case (e.g a Venus transit).
Parameters
----------
srepr : `~astropy.coordinates.SphericalRepresentation`
Astrometric ICRS position of object from observer
astrom : eraASTROM array
ERFA astrometry context, as produced by, e.g. ``eraApci13`` or ``eraApcs13``
Returns
-------
ri : float or `~numpy.ndarray`
Right Ascension in radians
di : float or `~numpy.ndarray`
Declination in radians
"""
# ignore parallax effects if no distance, or far away
srepr_distance = srepr.distance
ignore_distance = srepr_distance.unit == u.one
# BCRS coordinate direction (unit vector).
pco = erfa.s2c(srepr.lon.radian, srepr.lat.radian)
# Find BCRS direction of Sun to object
if ignore_distance:
# No distance to object, assume a long way away
q = pco
else:
# Find BCRS direction of Sun to object.
# astrom['eh'] and astrom['em'] contain Sun to observer unit vector,
# and distance, respectively.
eh = astrom["em"][..., np.newaxis] * astrom["eh"]
# unit vector from Sun to object
q = eh + srepr_distance[..., np.newaxis].to_value(u.au) * pco
sundist, q = erfa.pn(q)
sundist = sundist[..., np.newaxis]
# calculation above is extremely unstable very close to the sun
# in these situations, default back to ldsun-style behaviour,
# since this is reversible and drops to zero within stellar limb
q = np.where(sundist > 1.0e-10, q, pco)
# Light deflection by the Sun, giving BCRS natural direction.
pnat = erfa.ld(1.0, pco, q, astrom["eh"], astrom["em"], 1e-6)
# Aberration, giving GCRS proper direction.
ppr = erfa.ab(pnat, astrom["v"], astrom["em"], astrom["bm1"])
# Bias-precession-nutation, giving CIRS proper direction.
# Has no effect if matrix is identity matrix, in which case gives GCRS ppr.
pi = erfa.rxp(astrom["bpn"], ppr)
# CIRS (GCRS) RA, Dec
ri, di = erfa.c2s(pi)
return erfa.anp(ri), di
def prepare_earth_position_vel(time):
"""
Get barycentric position and velocity, and heliocentric position of Earth.
Parameters
----------
time : `~astropy.time.Time`
time at which to calculate position and velocity of Earth
Returns
-------
earth_pv : `np.ndarray`
Barycentric position and velocity of Earth, in au and au/day
earth_helio : `np.ndarray`
Heliocentric position of Earth in au
"""
# this goes here to avoid circular import errors
from astropy.coordinates.solar_system import (
get_body_barycentric,
get_body_barycentric_posvel,
solar_system_ephemeris,
)
# get barycentric position and velocity of earth
ephemeris = solar_system_ephemeris.get()
# if we are using the builtin erfa based ephemeris,
# we can use the fact that epv00 already provides all we need.
# This avoids calling epv00 twice, once
# in get_body_barycentric_posvel('earth') and once in
# get_body_barycentric('sun')
if ephemeris == "builtin":
jd1, jd2 = get_jd12(time, "tdb")
earth_pv_heliocentric, earth_pv = erfa.epv00(jd1, jd2)
earth_heliocentric = earth_pv_heliocentric["p"]
# all other ephemeris providers probably don't have a shortcut like this
else:
earth_p, earth_v = get_body_barycentric_posvel("earth", time)
# get heliocentric position of earth, preparing it for passing to erfa.
sun = get_body_barycentric("sun", time)
earth_heliocentric = (earth_p - sun).get_xyz(xyz_axis=-1).to_value(u.au)
# Also prepare earth_pv for passing to erfa, which wants it as
# a structured dtype.
earth_pv = pav2pv(
earth_p.get_xyz(xyz_axis=-1).to_value(u.au),
earth_v.get_xyz(xyz_axis=-1).to_value(u.au / u.d),
)
return earth_pv, earth_heliocentric
def get_offset_sun_from_barycenter(time, include_velocity=False, reverse=False):
"""
Returns the offset of the Sun center from the solar-system barycenter (SSB).
Parameters
----------
time : `~astropy.time.Time`
Time at which to calculate the offset
include_velocity : `bool`
If ``True``, attach the velocity as a differential. Defaults to ``False``.
reverse : `bool`
If ``True``, return the offset of the barycenter from the Sun. Defaults to ``False``.
Returns
-------
`~astropy.coordinates.CartesianRepresentation`
The offset
"""
if include_velocity:
# Import here to avoid a circular import
from astropy.coordinates.solar_system import get_body_barycentric_posvel
offset_pos, offset_vel = get_body_barycentric_posvel("sun", time)
if reverse:
offset_pos, offset_vel = -offset_pos, -offset_vel
offset_vel = offset_vel.represent_as(CartesianDifferential)
offset_pos = offset_pos.with_differentials(offset_vel)
else:
# Import here to avoid a circular import
from astropy.coordinates.solar_system import get_body_barycentric
offset_pos = get_body_barycentric("sun", time)
if reverse:
offset_pos = -offset_pos
return offset_pos
|
afe89d642143be65ed8f3e3d541b3c881dc447329519cace29d09a0fafe6f6da | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains the transformation functions for getting from ICRS/HCRS to CIRS and
anything in between (currently that means GCRS).
"""
import numpy as np
from astropy import units as u
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.erfa_astrom import erfa_astrom
from astropy.coordinates.representation import (
CartesianRepresentation,
SphericalRepresentation,
UnitSphericalRepresentation,
)
from astropy.coordinates.transformations import (
AffineTransform,
FunctionTransformWithFiniteDifference,
)
from .cirs import CIRS
from .gcrs import GCRS
from .hcrs import HCRS
from .icrs import ICRS
from .utils import atciqz, aticq, get_offset_sun_from_barycenter
# First the ICRS/CIRS related transforms
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ICRS, CIRS)
def icrs_to_cirs(icrs_coo, cirs_frame):
# first set up the astrometry context for ICRS<->CIRS
astrom = erfa_astrom.get().apco(cirs_frame)
if (
icrs_coo.data.get_name() == "unitspherical"
or icrs_coo.data.to_cartesian().x.unit == u.one
):
# if no distance, just do the infinite-distance/no parallax calculation
srepr = icrs_coo.spherical
cirs_ra, cirs_dec = atciqz(srepr.without_differentials(), astrom)
newrep = UnitSphericalRepresentation(
lat=u.Quantity(cirs_dec, u.radian, copy=False),
lon=u.Quantity(cirs_ra, u.radian, copy=False),
copy=False,
)
else:
# When there is a distance, we first offset for parallax to get the
# astrometric coordinate direction and *then* run the ERFA transform for
# no parallax/PM. This ensures reversibility and is more sensible for
# inside solar system objects
astrom_eb = CartesianRepresentation(
astrom["eb"], unit=u.au, xyz_axis=-1, copy=False
)
newcart = icrs_coo.cartesian - astrom_eb
srepr = newcart.represent_as(SphericalRepresentation)
cirs_ra, cirs_dec = atciqz(srepr.without_differentials(), astrom)
newrep = SphericalRepresentation(
lat=u.Quantity(cirs_dec, u.radian, copy=False),
lon=u.Quantity(cirs_ra, u.radian, copy=False),
distance=srepr.distance,
copy=False,
)
return cirs_frame.realize_frame(newrep)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, ICRS)
def cirs_to_icrs(cirs_coo, icrs_frame):
# set up the astrometry context for ICRS<->cirs and then convert to
# astrometric coordinate direction
astrom = erfa_astrom.get().apco(cirs_coo)
srepr = cirs_coo.represent_as(SphericalRepresentation)
i_ra, i_dec = aticq(srepr.without_differentials(), astrom)
if (
cirs_coo.data.get_name() == "unitspherical"
or cirs_coo.data.to_cartesian().x.unit == u.one
):
# if no distance, just use the coordinate direction to yield the
# infinite-distance/no parallax answer
newrep = UnitSphericalRepresentation(
lat=u.Quantity(i_dec, u.radian, copy=False),
lon=u.Quantity(i_ra, u.radian, copy=False),
copy=False,
)
else:
# When there is a distance, apply the parallax/offset to the SSB as the
# last step - ensures round-tripping with the icrs_to_cirs transform
# the distance in intermedrep is *not* a real distance as it does not
# include the offset back to the SSB
intermedrep = SphericalRepresentation(
lat=u.Quantity(i_dec, u.radian, copy=False),
lon=u.Quantity(i_ra, u.radian, copy=False),
distance=srepr.distance,
copy=False,
)
astrom_eb = CartesianRepresentation(
astrom["eb"], unit=u.au, xyz_axis=-1, copy=False
)
newrep = intermedrep + astrom_eb
return icrs_frame.realize_frame(newrep)
# Now the GCRS-related transforms to/from ICRS
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ICRS, GCRS)
def icrs_to_gcrs(icrs_coo, gcrs_frame):
# first set up the astrometry context for ICRS<->GCRS.
astrom = erfa_astrom.get().apcs(gcrs_frame)
if (
icrs_coo.data.get_name() == "unitspherical"
or icrs_coo.data.to_cartesian().x.unit == u.one
):
# if no distance, just do the infinite-distance/no parallax calculation
srepr = icrs_coo.represent_as(SphericalRepresentation)
gcrs_ra, gcrs_dec = atciqz(srepr.without_differentials(), astrom)
newrep = UnitSphericalRepresentation(
lat=u.Quantity(gcrs_dec, u.radian, copy=False),
lon=u.Quantity(gcrs_ra, u.radian, copy=False),
copy=False,
)
else:
# When there is a distance, we first offset for parallax to get the
# BCRS coordinate direction and *then* run the ERFA transform for no
# parallax/PM. This ensures reversibility and is more sensible for
# inside solar system objects
astrom_eb = CartesianRepresentation(
astrom["eb"], unit=u.au, xyz_axis=-1, copy=False
)
newcart = icrs_coo.cartesian - astrom_eb
srepr = newcart.represent_as(SphericalRepresentation)
gcrs_ra, gcrs_dec = atciqz(srepr.without_differentials(), astrom)
newrep = SphericalRepresentation(
lat=u.Quantity(gcrs_dec, u.radian, copy=False),
lon=u.Quantity(gcrs_ra, u.radian, copy=False),
distance=srepr.distance,
copy=False,
)
return gcrs_frame.realize_frame(newrep)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, ICRS)
def gcrs_to_icrs(gcrs_coo, icrs_frame):
# set up the astrometry context for ICRS<->GCRS and then convert to BCRS
# coordinate direction
astrom = erfa_astrom.get().apcs(gcrs_coo)
srepr = gcrs_coo.represent_as(SphericalRepresentation)
i_ra, i_dec = aticq(srepr.without_differentials(), astrom)
if (
gcrs_coo.data.get_name() == "unitspherical"
or gcrs_coo.data.to_cartesian().x.unit == u.one
):
# if no distance, just use the coordinate direction to yield the
# infinite-distance/no parallax answer
newrep = UnitSphericalRepresentation(
lat=u.Quantity(i_dec, u.radian, copy=False),
lon=u.Quantity(i_ra, u.radian, copy=False),
copy=False,
)
else:
# When there is a distance, apply the parallax/offset to the SSB as the
# last step - ensures round-tripping with the icrs_to_gcrs transform
# the distance in intermedrep is *not* a real distance as it does not
# include the offset back to the SSB
intermedrep = SphericalRepresentation(
lat=u.Quantity(i_dec, u.radian, copy=False),
lon=u.Quantity(i_ra, u.radian, copy=False),
distance=srepr.distance,
copy=False,
)
astrom_eb = CartesianRepresentation(
astrom["eb"], unit=u.au, xyz_axis=-1, copy=False
)
newrep = intermedrep + astrom_eb
return icrs_frame.realize_frame(newrep)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, HCRS)
def gcrs_to_hcrs(gcrs_coo, hcrs_frame):
if np.any(gcrs_coo.obstime != hcrs_frame.obstime):
# if they GCRS obstime and HCRS obstime are not the same, we first
# have to move to a GCRS where they are.
frameattrs = gcrs_coo.get_frame_attr_defaults()
frameattrs["obstime"] = hcrs_frame.obstime
gcrs_coo = gcrs_coo.transform_to(GCRS(**frameattrs))
# set up the astrometry context for ICRS<->GCRS and then convert to ICRS
# coordinate direction
astrom = erfa_astrom.get().apcs(gcrs_coo)
srepr = gcrs_coo.represent_as(SphericalRepresentation)
i_ra, i_dec = aticq(srepr.without_differentials(), astrom)
# convert to Quantity objects
i_ra = u.Quantity(i_ra, u.radian, copy=False)
i_dec = u.Quantity(i_dec, u.radian, copy=False)
if (
gcrs_coo.data.get_name() == "unitspherical"
or gcrs_coo.data.to_cartesian().x.unit == u.one
):
# if no distance, just use the coordinate direction to yield the
# infinite-distance/no parallax answer
newrep = UnitSphericalRepresentation(lat=i_dec, lon=i_ra, copy=False)
else:
# When there is a distance, apply the parallax/offset to the
# Heliocentre as the last step to ensure round-tripping with the
# hcrs_to_gcrs transform
# Note that the distance in intermedrep is *not* a real distance as it
# does not include the offset back to the Heliocentre
intermedrep = SphericalRepresentation(
lat=i_dec, lon=i_ra, distance=srepr.distance, copy=False
)
# astrom['eh'] and astrom['em'] contain Sun to observer unit vector,
# and distance, respectively. Shapes are (X) and (X,3), where (X) is the
# shape resulting from broadcasting the shape of the times object
# against the shape of the pv array.
# broadcast em to eh and scale eh
eh = astrom["eh"] * astrom["em"][..., np.newaxis]
eh = CartesianRepresentation(eh, unit=u.au, xyz_axis=-1, copy=False)
newrep = intermedrep.to_cartesian() + eh
return hcrs_frame.realize_frame(newrep)
_NEED_ORIGIN_HINT = (
"The input {0} coordinates do not have length units. This probably means you"
" created coordinates with lat/lon but no distance. Heliocentric<->ICRS transforms"
" cannot function in this case because there is an origin shift."
)
@frame_transform_graph.transform(AffineTransform, HCRS, ICRS)
def hcrs_to_icrs(hcrs_coo, icrs_frame):
# this is just an origin translation so without a distance it cannot go ahead
if isinstance(hcrs_coo.data, UnitSphericalRepresentation):
raise u.UnitsError(_NEED_ORIGIN_HINT.format(hcrs_coo.__class__.__name__))
return None, get_offset_sun_from_barycenter(
hcrs_coo.obstime, include_velocity=bool(hcrs_coo.data.differentials)
)
@frame_transform_graph.transform(AffineTransform, ICRS, HCRS)
def icrs_to_hcrs(icrs_coo, hcrs_frame):
# this is just an origin translation so without a distance it cannot go ahead
if isinstance(icrs_coo.data, UnitSphericalRepresentation):
raise u.UnitsError(_NEED_ORIGIN_HINT.format(icrs_coo.__class__.__name__))
return None, get_offset_sun_from_barycenter(
hcrs_frame.obstime,
reverse=True,
include_velocity=bool(icrs_coo.data.differentials),
)
# Create loopback transformations
frame_transform_graph._add_merged_transform(CIRS, ICRS, CIRS)
# The CIRS<-> CIRS transform going through ICRS has a
# subtle implication that a point in CIRS is uniquely determined
# by the corresponding astrometric ICRS coordinate *at its
# current time*. This has some subtle implications in terms of GR, but
# is sort of glossed over in the current scheme because we are dropping
# distances anyway.
frame_transform_graph._add_merged_transform(GCRS, ICRS, GCRS)
frame_transform_graph._add_merged_transform(HCRS, ICRS, HCRS)
|
a9ef0fe821d335e3d4e4bf9204a396c37408ec24a19541ab9b08192b6cb3e484 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import units as u
from astropy.coordinates.attributes import CoordinateAttribute, QuantityAttribute
from astropy.coordinates.baseframe import BaseCoordinateFrame, frame_transform_graph
from astropy.coordinates.matrix_utilities import matrix_transpose, rotation_matrix
from astropy.coordinates.transformations import (
DynamicMatrixTransform,
FunctionTransform,
)
_skyoffset_cache = {}
def make_skyoffset_cls(framecls):
"""
Create a new class that is the sky offset frame for a specific class of
origin frame. If such a class has already been created for this frame, the
same class will be returned.
The new class will always have component names for spherical coordinates of
``lon``/``lat``.
Parameters
----------
framecls : `~astropy.coordinates.BaseCoordinateFrame` subclass
The class to create the SkyOffsetFrame of.
Returns
-------
skyoffsetframecls : class
The class for the new skyoffset frame.
Notes
-----
This function is necessary because Astropy's frame transformations depend
on connection between specific frame *classes*. So each type of frame
needs its own distinct skyoffset frame class. This function generates
just that class, as well as ensuring that only one example of such a class
actually gets created in any given python session.
"""
if framecls in _skyoffset_cache:
return _skyoffset_cache[framecls]
# Create a new SkyOffsetFrame subclass for this frame class.
name = "SkyOffset" + framecls.__name__
_SkyOffsetFramecls = type(
name,
(SkyOffsetFrame, framecls),
{
"origin": CoordinateAttribute(frame=framecls, default=None),
# The following two have to be done because otherwise we use the
# defaults of SkyOffsetFrame set by BaseCoordinateFrame.
"_default_representation": framecls._default_representation,
"_default_differential": framecls._default_differential,
"__doc__": SkyOffsetFrame.__doc__,
},
)
@frame_transform_graph.transform(
FunctionTransform, _SkyOffsetFramecls, _SkyOffsetFramecls
)
def skyoffset_to_skyoffset(from_skyoffset_coord, to_skyoffset_frame):
"""Transform between two skyoffset frames."""
# This transform goes through the parent frames on each side.
# from_frame -> from_frame.origin -> to_frame.origin -> to_frame
tmp_from = from_skyoffset_coord.transform_to(from_skyoffset_coord.origin)
tmp_to = tmp_from.transform_to(to_skyoffset_frame.origin)
return tmp_to.transform_to(to_skyoffset_frame)
@frame_transform_graph.transform(
DynamicMatrixTransform, framecls, _SkyOffsetFramecls
)
def reference_to_skyoffset(reference_frame, skyoffset_frame):
"""Convert a reference coordinate to an sky offset frame."""
# Define rotation matrices along the position angle vector, and
# relative to the origin.
origin = skyoffset_frame.origin.spherical
return (
rotation_matrix(-skyoffset_frame.rotation, "x")
@ rotation_matrix(-origin.lat, "y")
@ rotation_matrix(origin.lon, "z")
)
@frame_transform_graph.transform(
DynamicMatrixTransform, _SkyOffsetFramecls, framecls
)
def skyoffset_to_reference(skyoffset_coord, reference_frame):
"""Convert an sky offset frame coordinate to the reference frame."""
# use the forward transform, but just invert it
R = reference_to_skyoffset(reference_frame, skyoffset_coord)
# transpose is the inverse because R is a rotation matrix
return matrix_transpose(R)
_skyoffset_cache[framecls] = _SkyOffsetFramecls
return _SkyOffsetFramecls
class SkyOffsetFrame(BaseCoordinateFrame):
"""
A frame which is relative to some specific position and oriented to match
its frame.
SkyOffsetFrames always have component names for spherical coordinates
of ``lon``/``lat``, *not* the component names for the frame of ``origin``.
This is useful for calculating offsets and dithers in the frame of the sky
relative to an arbitrary position. Coordinates in this frame are both centered on the position specified by the
``origin`` coordinate, *and* they are oriented in the same manner as the
``origin`` frame. E.g., if ``origin`` is `~astropy.coordinates.ICRS`, this
object's ``lat`` will be pointed in the direction of Dec, while ``lon``
will point in the direction of RA.
For more on skyoffset frames, see :ref:`astropy:astropy-skyoffset-frames`.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
origin : coordinate-like
The coordinate which specifies the origin of this frame. Note that this
origin is used purely for on-sky location/rotation. It can have a
``distance`` but it will not be used by this ``SkyOffsetFrame``.
rotation : angle-like
The final rotation of the frame about the ``origin``. The sign of
the rotation is the left-hand rule. That is, an object at a
particular position angle in the un-rotated system will be sent to
the positive latitude (z) direction in the final frame.
Notes
-----
``SkyOffsetFrame`` is a factory class. That is, the objects that it
yields are *not* actually objects of class ``SkyOffsetFrame``. Instead,
distinct classes are created on-the-fly for whatever the frame class is
of ``origin``.
"""
rotation = QuantityAttribute(default=0, unit=u.deg)
origin = CoordinateAttribute(default=None, frame=None)
def __new__(cls, *args, **kwargs):
# We don't want to call this method if we've already set up
# an skyoffset frame for this class.
if not (issubclass(cls, SkyOffsetFrame) and cls is not SkyOffsetFrame):
# We get the origin argument, and handle it here.
try:
origin_frame = kwargs["origin"]
except KeyError:
raise TypeError(
"Can't initialize a SkyOffsetFrame without origin= keyword."
)
if hasattr(origin_frame, "frame"):
origin_frame = origin_frame.frame
newcls = make_skyoffset_cls(origin_frame.__class__)
return newcls.__new__(newcls, *args, **kwargs)
# http://stackoverflow.com/questions/19277399/why-does-object-new-work-differently-in-these-three-cases
# See above for why this is necessary. Basically, because some child
# may override __new__, we must override it here to never pass
# arguments to the object.__new__ method.
if super().__new__ is object.__new__:
return super().__new__(cls)
return super().__new__(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.origin is not None and not self.origin.has_data:
raise ValueError("The origin supplied to SkyOffsetFrame has no data.")
if self.has_data:
self._set_skyoffset_data_lon_wrap_angle(self.data)
@staticmethod
def _set_skyoffset_data_lon_wrap_angle(data):
if hasattr(data, "lon"):
data.lon.wrap_angle = 180.0 * u.deg
return data
def represent_as(self, base, s="base", in_frame_units=False):
"""
Ensure the wrap angle for any spherical
representations.
"""
data = super().represent_as(base, s, in_frame_units=in_frame_units)
self._set_skyoffset_data_lon_wrap_angle(data)
return data
def __reduce__(self):
return (_skyoffset_reducer, (self.origin,), self.__dict__)
def _skyoffset_reducer(origin):
return SkyOffsetFrame.__new__(SkyOffsetFrame, origin=origin)
|
45027909fa8e86331b8db6207f39616f78ba6f276a8a30754e717a5c18b8977e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.matrix_utilities import matrix_transpose
from astropy.coordinates.transformations import DynamicMatrixTransform
from .fk4 import FK4NoETerms
from .fk5 import FK5
from .utils import EQUINOX_B1950, EQUINOX_J2000
# FK5 to/from FK4 ------------------->
# B1950->J2000 matrix from Murray 1989 A&A 218,325 eqn 28
_B1950_TO_J2000_M = np.array(
[
[0.9999256794956877, -0.0111814832204662, -0.0048590038153592],
[0.0111814832391717, +0.9999374848933135, -0.0000271625947142],
[0.0048590037723143, -0.0000271702937440, +0.9999881946023742],
]
)
_FK4_CORR = (
np.array(
[
[-0.0026455262, -1.1539918689, +2.1111346190],
[+1.1540628161, -0.0129042997, +0.0236021478],
[-2.1112979048, -0.0056024448, +0.0102587734],
]
)
* 1.0e-6
)
def _fk4_B_matrix(obstime):
"""
This is a correction term in the FK4 transformations because FK4 is a
rotating system - see Murray 89 eqn 29.
"""
# Note this is *julian century*, not besselian
T = (obstime.jyear - 1950.0) / 100.0
if getattr(T, "shape", ()):
# Ensure we broadcast possibly arrays of times properly.
T.shape += (1, 1)
return _B1950_TO_J2000_M + _FK4_CORR * T
# This transformation can't be static because the observation date is needed.
@frame_transform_graph.transform(DynamicMatrixTransform, FK4NoETerms, FK5)
def fk4_no_e_to_fk5(fk4noecoord, fk5frame):
# Correction terms for FK4 being a rotating system
B = _fk4_B_matrix(fk4noecoord.obstime)
# construct both precession matricies - if the equinoxes are B1950 and
# J2000, these are just identity matricies
pmat1 = fk4noecoord._precession_matrix(fk4noecoord.equinox, EQUINOX_B1950)
pmat2 = fk5frame._precession_matrix(EQUINOX_J2000, fk5frame.equinox)
return pmat2 @ B @ pmat1
# This transformation can't be static because the observation date is needed.
@frame_transform_graph.transform(DynamicMatrixTransform, FK5, FK4NoETerms)
def fk5_to_fk4_no_e(fk5coord, fk4noeframe):
# Get transposed version of the rotating correction terms... so with the
# transpose this takes us from FK5/J200 to FK4/B1950
B = matrix_transpose(_fk4_B_matrix(fk4noeframe.obstime))
# construct both precession matricies - if the equinoxes are B1950 and
# J2000, these are just identity matricies
pmat1 = fk5coord._precession_matrix(fk5coord.equinox, EQUINOX_J2000)
pmat2 = fk4noeframe._precession_matrix(EQUINOX_B1950, fk4noeframe.equinox)
return pmat2 @ B @ pmat1
|
74ff7cdd4603070b44c5fd22248895a8be347c0e203463be761c2ad10c6d9b76 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import units as u
from astropy.coordinates import representation as r
from astropy.coordinates.attributes import DifferentialAttribute
from astropy.coordinates.baseframe import (
BaseCoordinateFrame,
RepresentationMapping,
base_doc,
frame_transform_graph,
)
from astropy.coordinates.transformations import AffineTransform
from astropy.time import Time
from astropy.utils.decorators import format_doc
from .baseradec import BaseRADecFrame
from .baseradec import doc_components as doc_components_radec
from .galactic import Galactic
from .icrs import ICRS
# For speed
J2000 = Time("J2000")
v_bary_Schoenrich2010 = r.CartesianDifferential([11.1, 12.24, 7.25] * u.km / u.s)
__all__ = ["LSR", "GalacticLSR", "LSRK", "LSRD"]
doc_footer_lsr = """
Other parameters
----------------
v_bary : `~astropy.coordinates.CartesianDifferential`
The velocity of the solar system barycenter with respect to the LSR, in
Galactic cartesian velocity components.
"""
@format_doc(base_doc, components=doc_components_radec, footer=doc_footer_lsr)
class LSR(BaseRADecFrame):
r"""A coordinate or frame in the Local Standard of Rest (LSR).
This coordinate frame is axis-aligned and co-spatial with
`~astropy.coordinates.ICRS`, but has a velocity offset relative to the
solar system barycenter to remove the peculiar motion of the sun relative
to the LSR. Roughly, the LSR is the mean velocity of the stars in the solar
neighborhood, but the precise definition of which depends on the study. As
defined in Schönrich et al. (2010): "The LSR is the rest frame at the
location of the Sun of a star that would be on a circular orbit in the
gravitational potential one would obtain by azimuthally averaging away
non-axisymmetric features in the actual Galactic potential." No such orbit
truly exists, but it is still a commonly used velocity frame.
We use default values from Schönrich et al. (2010) for the barycentric
velocity relative to the LSR, which is defined in Galactic (right-handed)
cartesian velocity components
:math:`(U, V, W) = (11.1, 12.24, 7.25)~{{\rm km}}~{{\rm s}}^{{-1}}`. These
values are customizable via the ``v_bary`` argument which specifies the
velocity of the solar system barycenter with respect to the LSR.
The frame attributes are listed under **Other Parameters**.
"""
# frame attributes:
v_bary = DifferentialAttribute(
default=v_bary_Schoenrich2010, allowed_classes=[r.CartesianDifferential]
)
@frame_transform_graph.transform(AffineTransform, ICRS, LSR)
def icrs_to_lsr(icrs_coord, lsr_frame):
v_bary_gal = Galactic(lsr_frame.v_bary.to_cartesian())
v_bary_icrs = v_bary_gal.transform_to(icrs_coord)
v_offset = v_bary_icrs.data.represent_as(r.CartesianDifferential)
offset = r.CartesianRepresentation([0, 0, 0] * u.au, differentials=v_offset)
return None, offset
@frame_transform_graph.transform(AffineTransform, LSR, ICRS)
def lsr_to_icrs(lsr_coord, icrs_frame):
v_bary_gal = Galactic(lsr_coord.v_bary.to_cartesian())
v_bary_icrs = v_bary_gal.transform_to(icrs_frame)
v_offset = v_bary_icrs.data.represent_as(r.CartesianDifferential)
offset = r.CartesianRepresentation([0, 0, 0] * u.au, differentials=-v_offset)
return None, offset
# ------------------------------------------------------------------------------
doc_components_gal = """
l : `~astropy.coordinates.Angle`, optional, keyword-only
The Galactic longitude for this object (``b`` must also be given and
``representation`` must be None).
b : `~astropy.coordinates.Angle`, optional, keyword-only
The Galactic latitude for this object (``l`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity` ['length'], optional, keyword-only
The Distance for this object along the line-of-sight.
(``representation`` must be None).
pm_l_cosb : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in Galactic longitude (including the ``cos(b)`` term)
for this object (``pm_b`` must also be given).
pm_b : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in Galactic latitude for this object (``pm_l_cosb``
must also be given).
radial_velocity : `~astropy.units.Quantity` ['speed'], optional, keyword-only
The radial velocity of this object.
"""
@format_doc(base_doc, components=doc_components_gal, footer=doc_footer_lsr)
class GalacticLSR(BaseCoordinateFrame):
r"""A coordinate or frame in the Local Standard of Rest (LSR), axis-aligned
to the Galactic frame.
This coordinate frame is axis-aligned and co-spatial with
`~astropy.coordinates.ICRS`, but has a velocity offset relative to the
solar system barycenter to remove the peculiar motion of the sun relative
to the LSR. Roughly, the LSR is the mean velocity of the stars in the solar
neighborhood, but the precise definition of which depends on the study. As
defined in Schönrich et al. (2010): "The LSR is the rest frame at the
location of the Sun of a star that would be on a circular orbit in the
gravitational potential one would obtain by azimuthally averaging away
non-axisymmetric features in the actual Galactic potential." No such orbit
truly exists, but it is still a commonly used velocity frame.
We use default values from Schönrich et al. (2010) for the barycentric
velocity relative to the LSR, which is defined in Galactic (right-handed)
cartesian velocity components
:math:`(U, V, W) = (11.1, 12.24, 7.25)~{{\rm km}}~{{\rm s}}^{{-1}}`. These
values are customizable via the ``v_bary`` argument which specifies the
velocity of the solar system barycenter with respect to the LSR.
The frame attributes are listed under **Other Parameters**.
"""
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping("lon", "l"),
RepresentationMapping("lat", "b"),
]
}
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
# frame attributes:
v_bary = DifferentialAttribute(default=v_bary_Schoenrich2010)
@frame_transform_graph.transform(AffineTransform, Galactic, GalacticLSR)
def galactic_to_galacticlsr(galactic_coord, lsr_frame):
v_bary_gal = Galactic(lsr_frame.v_bary.to_cartesian())
v_offset = v_bary_gal.data.represent_as(r.CartesianDifferential)
offset = r.CartesianRepresentation([0, 0, 0] * u.au, differentials=v_offset)
return None, offset
@frame_transform_graph.transform(AffineTransform, GalacticLSR, Galactic)
def galacticlsr_to_galactic(lsr_coord, galactic_frame):
v_bary_gal = Galactic(lsr_coord.v_bary.to_cartesian())
v_offset = v_bary_gal.data.represent_as(r.CartesianDifferential)
offset = r.CartesianRepresentation([0, 0, 0] * u.au, differentials=-v_offset)
return None, offset
# ------------------------------------------------------------------------------
# The LSRK velocity frame, defined as having a velocity of 20 km/s towards
# RA=270 Dec=30 (B1900) relative to the solar system Barycenter. This is defined
# in:
#
# Gordon 1975, Methods of Experimental Physics: Volume 12:
# Astrophysics, Part C: Radio Observations - Section 6.1.5.
class LSRK(BaseRADecFrame):
r"""A coordinate or frame in the Kinematic Local Standard of Rest (LSR).
This frame is defined as having a velocity of 20 km/s towards RA=270 Dec=30
(B1900) relative to the solar system Barycenter. This is defined in:
Gordon 1975, Methods of Experimental Physics: Volume 12:
Astrophysics, Part C: Radio Observations - Section 6.1.5.
This coordinate frame is axis-aligned and co-spatial with
`~astropy.coordinates.ICRS`, but has a velocity offset relative to the
solar system barycenter to remove the peculiar motion of the sun relative
to the LSRK.
"""
# NOTE: To avoid a performance penalty at import time, we hard-code the ICRS
# offsets here. The code to generate the offsets is provided for reproducibility.
# GORDON1975_V_BARY = 20*u.km/u.s
# GORDON1975_DIRECTION = FK4(ra=270*u.deg, dec=30*u.deg, equinox='B1900')
# V_OFFSET_LSRK = ((GORDON1975_V_BARY * GORDON1975_DIRECTION.transform_to(ICRS()).data)
# .represent_as(r.CartesianDifferential))
V_OFFSET_LSRK = r.CartesianDifferential(
[0.28999706839034606, -17.317264789717928, 10.00141199546947] * u.km / u.s
)
ICRS_LSRK_OFFSET = r.CartesianRepresentation(
[0, 0, 0] * u.au, differentials=V_OFFSET_LSRK
)
LSRK_ICRS_OFFSET = r.CartesianRepresentation(
[0, 0, 0] * u.au, differentials=-V_OFFSET_LSRK
)
@frame_transform_graph.transform(AffineTransform, ICRS, LSRK)
def icrs_to_lsrk(icrs_coord, lsr_frame):
return None, ICRS_LSRK_OFFSET
@frame_transform_graph.transform(AffineTransform, LSRK, ICRS)
def lsrk_to_icrs(lsr_coord, icrs_frame):
return None, LSRK_ICRS_OFFSET
# ------------------------------------------------------------------------------
# The LSRD velocity frame, defined as a velocity of U=9 km/s, V=12 km/s,
# and W=7 km/s in Galactic coordinates or 16.552945 km/s
# towards l=53.13 b=25.02. This is defined in:
#
# Delhaye 1965, Solar Motion and Velocity Distribution of
# Common Stars.
class LSRD(BaseRADecFrame):
r"""A coordinate or frame in the Dynamical Local Standard of Rest (LSRD).
This frame is defined as a velocity of U=9 km/s, V=12 km/s,
and W=7 km/s in Galactic coordinates or 16.552945 km/s
towards l=53.13 b=25.02. This is defined in:
Delhaye 1965, Solar Motion and Velocity Distribution of
Common Stars.
This coordinate frame is axis-aligned and co-spatial with
`~astropy.coordinates.ICRS`, but has a velocity offset relative to the
solar system barycenter to remove the peculiar motion of the sun relative
to the LSRD.
"""
# NOTE: To avoid a performance penalty at import time, we hard-code the ICRS
# offsets here. The code to generate the offsets is provided for reproducibility.
# V_BARY_DELHAYE1965 = r.CartesianDifferential([9, 12, 7] * u.km/u.s)
# V_OFFSET_LSRD = (Galactic(V_BARY_DELHAYE1965.to_cartesian()).transform_to(ICRS()).data
# .represent_as(r.CartesianDifferential))
V_OFFSET_LSRD = r.CartesianDifferential(
[-0.6382306360182073, -14.585424483191094, 7.8011572411006815] * u.km / u.s
)
ICRS_LSRD_OFFSET = r.CartesianRepresentation(
[0, 0, 0] * u.au, differentials=V_OFFSET_LSRD
)
LSRD_ICRS_OFFSET = r.CartesianRepresentation(
[0, 0, 0] * u.au, differentials=-V_OFFSET_LSRD
)
@frame_transform_graph.transform(AffineTransform, ICRS, LSRD)
def icrs_to_lsrd(icrs_coord, lsr_frame):
return None, ICRS_LSRD_OFFSET
@frame_transform_graph.transform(AffineTransform, LSRD, ICRS)
def lsrd_to_icrs(lsr_coord, icrs_frame):
return None, LSRD_ICRS_OFFSET
# ------------------------------------------------------------------------------
# Create loopback transformations
frame_transform_graph._add_merged_transform(LSR, ICRS, LSR)
frame_transform_graph._add_merged_transform(GalacticLSR, Galactic, GalacticLSR)
|
c10be5d9eee92eacd2018a640bd0dd14a4c4f95ef54f610717a4b0995974ed51 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.matrix_utilities import matrix_transpose, rotation_matrix
from astropy.coordinates.transformations import DynamicMatrixTransform
from .fk5 import FK5
from .icrs import ICRS
from .utils import EQUINOX_J2000
def _icrs_to_fk5_matrix():
"""
B-matrix from USNO circular 179. Used by the ICRS->FK5 transformation
functions.
"""
eta0 = -19.9 / 3600000.0
xi0 = 9.1 / 3600000.0
da0 = -22.9 / 3600000.0
return (
rotation_matrix(-eta0, "x")
@ rotation_matrix(xi0, "y")
@ rotation_matrix(da0, "z")
)
# define this here because it only needs to be computed once
_ICRS_TO_FK5_J2000_MAT = _icrs_to_fk5_matrix()
@frame_transform_graph.transform(DynamicMatrixTransform, ICRS, FK5)
def icrs_to_fk5(icrscoord, fk5frame):
# ICRS is by design very close to J2000 equinox
pmat = fk5frame._precession_matrix(EQUINOX_J2000, fk5frame.equinox)
return pmat @ _ICRS_TO_FK5_J2000_MAT
# can't be static because the equinox is needed
@frame_transform_graph.transform(DynamicMatrixTransform, FK5, ICRS)
def fk5_to_icrs(fk5coord, icrsframe):
# ICRS is by design very close to J2000 equinox
pmat = fk5coord._precession_matrix(fk5coord.equinox, EQUINOX_J2000)
return matrix_transpose(_ICRS_TO_FK5_J2000_MAT) @ pmat
|
12ed491ba8c97a7039f39ab86e665cab5a729939304afaa90ef18555babd0653 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import re
from copy import deepcopy
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import EarthLocation, SkyCoord, galactocentric_frame_defaults
from astropy.coordinates import representation as r
from astropy.coordinates.attributes import (
Attribute,
CoordinateAttribute,
DifferentialAttribute,
EarthLocationAttribute,
QuantityAttribute,
TimeAttribute,
)
from astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping
from astropy.coordinates.builtin_frames import (
FK4,
FK5,
GCRS,
HCRS,
ICRS,
ITRS,
AltAz,
Galactic,
Galactocentric,
HADec,
)
from astropy.coordinates.representation import (
REPRESENTATION_CLASSES,
CartesianDifferential,
)
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.time import Time
from astropy.units import allclose
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
from .test_representation import unitphysics # this fixture is used below # noqa: F401
def setup_function(func):
"""Copy original 'REPRESENTATIONCLASSES' as attribute in function."""
func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES)
def teardown_function(func):
"""Reset REPRESENTATION_CLASSES to original value."""
REPRESENTATION_CLASSES.clear()
REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG)
def test_frame_attribute_descriptor():
"""Unit tests of the Attribute descriptor."""
class TestAttributes:
attr_none = Attribute()
attr_2 = Attribute(default=2)
attr_3_attr2 = Attribute(default=3, secondary_attribute="attr_2")
attr_none_attr2 = Attribute(default=None, secondary_attribute="attr_2")
attr_none_nonexist = Attribute(default=None, secondary_attribute="nonexist")
t = TestAttributes()
# Defaults
assert t.attr_none is None
assert t.attr_2 == 2
assert t.attr_3_attr2 == 3
assert t.attr_none_attr2 == t.attr_2
assert t.attr_none_nonexist is None # No default and non-existent secondary attr
# Setting values via '_'-prefixed internal vars
# (as would normally done in __init__)
t._attr_none = 10
assert t.attr_none == 10
t._attr_2 = 20
assert t.attr_2 == 20
assert t.attr_3_attr2 == 3
assert t.attr_none_attr2 == t.attr_2
t._attr_none_attr2 = 40
assert t.attr_none_attr2 == 40
# Make sure setting values via public attribute fails
with pytest.raises(AttributeError) as err:
t.attr_none = 5
assert "Cannot set frame attribute" in str(err.value)
def test_frame_subclass_attribute_descriptor():
"""Unit test of the attribute descriptors in subclasses."""
_EQUINOX_B1980 = Time("B1980", scale="tai")
class MyFK4(FK4):
# equinox inherited from FK4, obstime overridden, and newattr is new
obstime = TimeAttribute(default=_EQUINOX_B1980)
newattr = Attribute(default="newattr")
mfk4 = MyFK4()
assert mfk4.equinox.value == "B1950.000"
assert mfk4.obstime.value == "B1980.000"
assert mfk4.newattr == "newattr"
with pytest.warns(AstropyDeprecationWarning):
assert set(mfk4.get_frame_attr_names()) == {"equinox", "obstime", "newattr"}
mfk4 = MyFK4(equinox="J1980.0", obstime="J1990.0", newattr="world")
assert mfk4.equinox.value == "J1980.000"
assert mfk4.obstime.value == "J1990.000"
assert mfk4.newattr == "world"
def test_frame_multiple_inheritance_attribute_descriptor():
"""
Ensure that all attributes are accumulated in case of inheritance from
multiple BaseCoordinateFrames. See
https://github.com/astropy/astropy/pull/11099#issuecomment-735829157
"""
class Frame1(BaseCoordinateFrame):
attr1 = Attribute()
class Frame2(BaseCoordinateFrame):
attr2 = Attribute()
class Frame3(Frame1, Frame2):
pass
assert len(Frame3.frame_attributes) == 2
assert "attr1" in Frame3.frame_attributes
assert "attr2" in Frame3.frame_attributes
# In case the same attribute exists in both frames, the one from the
# left-most class in the MRO should take precedence
class Frame4(BaseCoordinateFrame):
attr1 = Attribute()
attr2 = Attribute()
class Frame5(Frame1, Frame4):
pass
assert Frame5.frame_attributes["attr1"] is Frame1.frame_attributes["attr1"]
assert Frame5.frame_attributes["attr2"] is Frame4.frame_attributes["attr2"]
def test_differentialattribute():
# Test logic of passing input through to allowed class
vel = [1, 2, 3] * u.km / u.s
dif = r.CartesianDifferential(vel)
class TestFrame(BaseCoordinateFrame):
attrtest = DifferentialAttribute(
default=dif, allowed_classes=[r.CartesianDifferential]
)
frame1 = TestFrame()
frame2 = TestFrame(attrtest=dif)
frame3 = TestFrame(attrtest=vel)
assert np.all(frame1.attrtest.d_xyz == frame2.attrtest.d_xyz)
assert np.all(frame1.attrtest.d_xyz == frame3.attrtest.d_xyz)
# This shouldn't work if there is more than one allowed class:
class TestFrame2(BaseCoordinateFrame):
attrtest = DifferentialAttribute(
default=dif,
allowed_classes=[r.CartesianDifferential, r.CylindricalDifferential],
)
frame1 = TestFrame2()
frame2 = TestFrame2(attrtest=dif)
with pytest.raises(TypeError):
TestFrame2(attrtest=vel)
def test_create_data_frames():
# from repr
i1 = ICRS(r.SphericalRepresentation(1 * u.deg, 2 * u.deg, 3 * u.kpc))
i2 = ICRS(r.UnitSphericalRepresentation(lon=1 * u.deg, lat=2 * u.deg))
# from preferred name
i3 = ICRS(ra=1 * u.deg, dec=2 * u.deg, distance=3 * u.kpc)
i4 = ICRS(ra=1 * u.deg, dec=2 * u.deg)
assert i1.data.lat == i3.data.lat
assert i1.data.lon == i3.data.lon
assert i1.data.distance == i3.data.distance
assert i2.data.lat == i4.data.lat
assert i2.data.lon == i4.data.lon
# now make sure the preferred names work as properties
assert_allclose(i1.ra, i3.ra)
assert_allclose(i2.ra, i4.ra)
assert_allclose(i1.distance, i3.distance)
with pytest.raises(AttributeError):
i1.ra = [11.0] * u.deg
def test_create_orderered_data():
TOL = 1e-10 * u.deg
i = ICRS(1 * u.deg, 2 * u.deg)
assert (i.ra - 1 * u.deg) < TOL
assert (i.dec - 2 * u.deg) < TOL
g = Galactic(1 * u.deg, 2 * u.deg)
assert (g.l - 1 * u.deg) < TOL
assert (g.b - 2 * u.deg) < TOL
a = AltAz(1 * u.deg, 2 * u.deg)
assert (a.az - 1 * u.deg) < TOL
assert (a.alt - 2 * u.deg) < TOL
with pytest.raises(TypeError):
ICRS(1 * u.deg, 2 * u.deg, 1 * u.deg, 2 * u.deg)
with pytest.raises(TypeError):
sph = r.SphericalRepresentation(1 * u.deg, 2 * u.deg, 3 * u.kpc)
ICRS(sph, 1 * u.deg, 2 * u.deg)
def test_create_nodata_frames():
i = ICRS()
assert len(i.frame_attributes) == 0
f5 = FK5()
assert f5.equinox == FK5.get_frame_attr_defaults()["equinox"]
f4 = FK4()
assert f4.equinox == FK4.get_frame_attr_defaults()["equinox"]
# obstime is special because it's a property that uses equinox if obstime is not set
assert f4.obstime in (
FK4.get_frame_attr_defaults()["obstime"],
FK4.get_frame_attr_defaults()["equinox"],
)
def test_no_data_nonscalar_frames():
a1 = AltAz(
obstime=Time("2012-01-01") + np.arange(10.0) * u.day,
temperature=np.ones((3, 1)) * u.deg_C,
)
assert a1.obstime.shape == (3, 10)
assert a1.temperature.shape == (3, 10)
assert a1.shape == (3, 10)
with pytest.raises(ValueError) as exc:
AltAz(
obstime=Time("2012-01-01") + np.arange(10.0) * u.day,
temperature=np.ones((3,)) * u.deg_C,
)
assert "inconsistent shapes" in str(exc.value)
def test_frame_repr():
i = ICRS()
assert repr(i) == "<ICRS Frame>"
f5 = FK5()
assert repr(f5).startswith("<FK5 Frame (equinox=")
i2 = ICRS(ra=1 * u.deg, dec=2 * u.deg)
i3 = ICRS(ra=1 * u.deg, dec=2 * u.deg, distance=3 * u.kpc)
assert repr(i2) == "<ICRS Coordinate: (ra, dec) in deg\n (1., 2.)>"
assert (
repr(i3)
== "<ICRS Coordinate: (ra, dec, distance) in (deg, deg, kpc)\n (1., 2., 3.)>"
)
# try with arrays
i2 = ICRS(ra=[1.1, 2.1] * u.deg, dec=[2.1, 3.1] * u.deg)
i3 = ICRS(
ra=[1.1, 2.1] * u.deg, dec=[-15.6, 17.1] * u.deg, distance=[11.0, 21.0] * u.kpc
)
assert (
repr(i2) == "<ICRS Coordinate: (ra, dec) in deg\n [(1.1, 2.1), (2.1, 3.1)]>"
)
assert (
repr(i3) == "<ICRS Coordinate: (ra, dec, distance) in (deg, deg, kpc)\n"
" [(1.1, -15.6, 11.), (2.1, 17.1, 21.)]>"
)
def test_frame_repr_vels():
i = ICRS(
ra=1 * u.deg,
dec=2 * u.deg,
pm_ra_cosdec=1 * u.marcsec / u.yr,
pm_dec=2 * u.marcsec / u.yr,
)
# unit comes out as mas/yr because of the preferred units defined in the
# frame RepresentationMapping
assert (
repr(i) == "<ICRS Coordinate: (ra, dec) in deg\n"
" (1., 2.)\n"
" (pm_ra_cosdec, pm_dec) in mas / yr\n"
" (1., 2.)>"
)
def test_converting_units():
# this is a regular expression that with split (see below) removes what's
# the decimal point to fix rounding problems
rexrepr = re.compile(r"(.*?=\d\.).*?( .*?=\d\.).*?( .*)")
# Use values that aren't subject to rounding down to X.9999...
i2 = ICRS(ra=2.0 * u.deg, dec=2.0 * u.deg)
i2_many = ICRS(ra=[2.0, 4.0] * u.deg, dec=[2.0, -8.1] * u.deg)
# converting from FK5 to ICRS and back changes the *internal* representation,
# but it should still come out in the preferred form
i4 = i2.transform_to(FK5()).transform_to(ICRS())
i4_many = i2_many.transform_to(FK5()).transform_to(ICRS())
ri2 = "".join(rexrepr.split(repr(i2)))
ri4 = "".join(rexrepr.split(repr(i4)))
assert ri2 == ri4
assert i2.data.lon.unit != i4.data.lon.unit # Internal repr changed
ri2_many = "".join(rexrepr.split(repr(i2_many)))
ri4_many = "".join(rexrepr.split(repr(i4_many)))
assert ri2_many == ri4_many
assert i2_many.data.lon.unit != i4_many.data.lon.unit # Internal repr changed
# but that *shouldn't* hold if we turn off units for the representation
class FakeICRS(ICRS):
frame_specific_representation_info = {
"spherical": [
RepresentationMapping("lon", "ra", u.hourangle),
RepresentationMapping("lat", "dec", None),
RepresentationMapping("distance", "distance"),
] # should fall back to default of None unit
}
fi = FakeICRS(i4.data)
ri2 = "".join(rexrepr.split(repr(i2)))
rfi = "".join(rexrepr.split(repr(fi)))
rfi = re.sub("FakeICRS", "ICRS", rfi) # Force frame name to match
assert ri2 != rfi
# the attributes should also get the right units
assert i2.dec.unit == i4.dec.unit
# unless no/explicitly given units
assert i2.dec.unit != fi.dec.unit
assert i2.ra.unit != fi.ra.unit
assert fi.ra.unit == u.hourangle
def test_representation_info():
class NewICRS1(ICRS):
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping("lon", "rara", u.hourangle),
RepresentationMapping("lat", "decdec", u.degree),
RepresentationMapping("distance", "distance", u.kpc),
]
}
i1 = NewICRS1(
rara=10 * u.degree,
decdec=-12 * u.deg,
distance=1000 * u.pc,
pm_rara_cosdecdec=100 * u.mas / u.yr,
pm_decdec=17 * u.mas / u.yr,
radial_velocity=10 * u.km / u.s,
)
assert allclose(i1.rara, 10 * u.deg)
assert i1.rara.unit == u.hourangle
assert allclose(i1.decdec, -12 * u.deg)
assert allclose(i1.distance, 1000 * u.pc)
assert i1.distance.unit == u.kpc
assert allclose(i1.pm_rara_cosdecdec, 100 * u.mas / u.yr)
assert allclose(i1.pm_decdec, 17 * u.mas / u.yr)
# this should auto-set the names of UnitSpherical:
i1.set_representation_cls(
r.UnitSphericalRepresentation, s=r.UnitSphericalCosLatDifferential
)
assert allclose(i1.rara, 10 * u.deg)
assert allclose(i1.decdec, -12 * u.deg)
assert allclose(i1.pm_rara_cosdecdec, 100 * u.mas / u.yr)
assert allclose(i1.pm_decdec, 17 * u.mas / u.yr)
# For backwards compatibility, we also support the string name in the
# representation info dictionary:
class NewICRS2(ICRS):
frame_specific_representation_info = {
"spherical": [
RepresentationMapping("lon", "ang1", u.hourangle),
RepresentationMapping("lat", "ang2", u.degree),
RepresentationMapping("distance", "howfar", u.kpc),
]
}
i2 = NewICRS2(ang1=10 * u.degree, ang2=-12 * u.deg, howfar=1000 * u.pc)
assert allclose(i2.ang1, 10 * u.deg)
assert i2.ang1.unit == u.hourangle
assert allclose(i2.ang2, -12 * u.deg)
assert allclose(i2.howfar, 1000 * u.pc)
assert i2.howfar.unit == u.kpc
# Test that the differential kwargs get overridden
class NewICRS3(ICRS):
frame_specific_representation_info = {
r.SphericalCosLatDifferential: [
RepresentationMapping("d_lon_coslat", "pm_ang1", u.hourangle / u.year),
RepresentationMapping("d_lat", "pm_ang2"),
RepresentationMapping("d_distance", "vlos", u.kpc / u.Myr),
]
}
i3 = NewICRS3(
lon=10 * u.degree,
lat=-12 * u.deg,
distance=1000 * u.pc,
pm_ang1=1 * u.mas / u.yr,
pm_ang2=2 * u.mas / u.yr,
vlos=100 * u.km / u.s,
)
assert allclose(i3.pm_ang1, 1 * u.mas / u.yr)
assert i3.pm_ang1.unit == u.hourangle / u.year
assert allclose(i3.pm_ang2, 2 * u.mas / u.yr)
assert allclose(i3.vlos, 100 * u.km / u.s)
assert i3.vlos.unit == u.kpc / u.Myr
def test_realizing():
rep = r.SphericalRepresentation(1 * u.deg, 2 * u.deg, 3 * u.kpc)
i = ICRS()
i2 = i.realize_frame(rep)
assert not i.has_data
assert i2.has_data
f = FK5(equinox=Time("J2001"))
f2 = f.realize_frame(rep)
assert not f.has_data
assert f2.has_data
assert f2.equinox == f.equinox
assert f2.equinox != FK5.get_frame_attr_defaults()["equinox"]
# Check that a nicer error message is returned:
with pytest.raises(
TypeError, match="Class passed as data instead of a representation"
):
f.realize_frame(f.representation_type)
def test_replicating():
i = ICRS(ra=[1] * u.deg, dec=[2] * u.deg)
icopy = i.replicate(copy=True)
irepl = i.replicate(copy=False)
i.data._lat[:] = 0 * u.deg
assert np.all(i.data.lat == irepl.data.lat)
assert np.all(i.data.lat != icopy.data.lat)
iclone = i.replicate_without_data()
assert i.has_data
assert not iclone.has_data
aa = AltAz(alt=1 * u.deg, az=2 * u.deg, obstime=Time("J2000"))
aaclone = aa.replicate_without_data(obstime=Time("J2001"))
assert not aaclone.has_data
assert aa.obstime != aaclone.obstime
assert aa.pressure == aaclone.pressure
assert aa.obswl == aaclone.obswl
def test_getitem():
rep = r.SphericalRepresentation(
[1, 2, 3] * u.deg, [4, 5, 6] * u.deg, [7, 8, 9] * u.kpc
)
i = ICRS(rep)
assert len(i.ra) == 3
iidx = i[1:]
assert len(iidx.ra) == 2
iidx2 = i[0]
assert iidx2.ra.isscalar
def test_transform():
"""
This test just makes sure the transform architecture works, but does *not*
actually test all the builtin transforms themselves are accurate.
"""
i = ICRS(ra=[1, 2] * u.deg, dec=[3, 4] * u.deg)
f = i.transform_to(FK5())
i2 = f.transform_to(ICRS())
assert i2.data.__class__ == r.UnitSphericalRepresentation
assert_allclose(i.ra, i2.ra)
assert_allclose(i.dec, i2.dec)
i = ICRS(ra=[1, 2] * u.deg, dec=[3, 4] * u.deg, distance=[5, 6] * u.kpc)
f = i.transform_to(FK5())
i2 = f.transform_to(ICRS())
assert i2.data.__class__ != r.UnitSphericalRepresentation
f = FK5(ra=1 * u.deg, dec=2 * u.deg, equinox=Time("J2001"))
f4 = f.transform_to(FK4())
f4_2 = f.transform_to(FK4(equinox=f.equinox))
# make sure attributes are copied over correctly
assert f4.equinox == FK4().equinox
assert f4_2.equinox == f.equinox
# make sure self-transforms also work
i = ICRS(ra=[1, 2] * u.deg, dec=[3, 4] * u.deg)
i2 = i.transform_to(ICRS())
assert_allclose(i.ra, i2.ra)
assert_allclose(i.dec, i2.dec)
f = FK5(ra=1 * u.deg, dec=2 * u.deg, equinox=Time("J2001"))
f2 = f.transform_to(FK5()) # default equinox, so should be *different*
assert f2.equinox == FK5().equinox
with pytest.raises(AssertionError):
assert_allclose(f.ra, f2.ra)
with pytest.raises(AssertionError):
assert_allclose(f.dec, f2.dec)
# finally, check Galactic round-tripping
i1 = ICRS(ra=[1, 2] * u.deg, dec=[3, 4] * u.deg)
i2 = i1.transform_to(Galactic()).transform_to(ICRS())
assert_allclose(i1.ra, i2.ra)
assert_allclose(i1.dec, i2.dec)
def test_transform_to_nonscalar_nodata_frame():
# https://github.com/astropy/astropy/pull/5254#issuecomment-241592353
times = Time("2016-08-23") + np.linspace(0, 10, 12) * u.day
coo1 = ICRS(
ra=[[0.0], [10.0], [20.0]] * u.deg, dec=[[-30.0], [30.0], [60.0]] * u.deg
)
coo2 = coo1.transform_to(FK5(equinox=times))
assert coo2.shape == (3, 12)
def test_setitem_no_velocity():
"""Test different flavors of item setting for a Frame without a velocity."""
obstime = "B1955"
sc0 = FK4([1, 2] * u.deg, [3, 4] * u.deg, obstime=obstime)
sc2 = FK4([10, 20] * u.deg, [30, 40] * u.deg, obstime=obstime)
sc1 = sc0.copy()
sc1_repr = repr(sc1)
assert "representation" in sc1.cache
sc1[1] = sc2[0]
assert sc1.cache == {}
assert repr(sc2) != sc1_repr
assert np.allclose(sc1.ra.to_value(u.deg), [1, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [3, 30])
assert sc1.obstime == sc2.obstime
assert sc1.name == "fk4"
sc1 = sc0.copy()
sc1[:] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 30])
sc1 = sc0.copy()
sc1[:] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 20])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 40])
sc1 = sc0.copy()
sc1[[1, 0]] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [20, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [40, 30])
# Works for array-valued obstime so long as they are considered equivalent
sc1 = FK4(sc0.ra, sc0.dec, obstime=[obstime, obstime])
sc1[0] = sc2[0]
# Multidimensional coordinates
sc1 = FK4([[1, 2], [3, 4]] * u.deg, [[5, 6], [7, 8]] * u.deg)
sc2 = FK4([[10, 20], [30, 40]] * u.deg, [[50, 60], [70, 80]] * u.deg)
sc1[0] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [[10, 20], [3, 4]])
assert np.allclose(sc1.dec.to_value(u.deg), [[50, 60], [7, 8]])
def test_setitem_velocities():
"""Test different flavors of item setting for a Frame with a velocity."""
sc0 = FK4(
[1, 2] * u.deg,
[3, 4] * u.deg,
radial_velocity=[1, 2] * u.km / u.s,
obstime="B1950",
)
sc2 = FK4(
[10, 20] * u.deg,
[30, 40] * u.deg,
radial_velocity=[10, 20] * u.km / u.s,
obstime="B1950",
)
sc1 = sc0.copy()
sc1[1] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [1, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [3, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [1, 10])
assert sc1.obstime == sc2.obstime
assert sc1.name == "fk4"
sc1 = sc0.copy()
sc1[:] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [10, 10])
sc1 = sc0.copy()
sc1[:] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 20])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 40])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [10, 20])
sc1 = sc0.copy()
sc1[[1, 0]] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [20, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [40, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [20, 10])
def test_setitem_exceptions():
obstime = "B1950"
sc0 = FK4([1, 2] * u.deg, [3, 4] * u.deg)
sc2 = FK4([10, 20] * u.deg, [30, 40] * u.deg, obstime=obstime)
sc1 = Galactic(sc0.ra, sc0.dec)
with pytest.raises(
TypeError, match="can only set from object of same class: Galactic vs. FK4"
):
sc1[0] = sc2[0]
sc1 = FK4(sc0.ra, sc0.dec, obstime="B2001")
with pytest.raises(
ValueError, match="can only set frame item from an equivalent frame"
):
sc1[0] = sc2[0]
sc1 = FK4(sc0.ra[0], sc0.dec[0], obstime=obstime)
with pytest.raises(
TypeError, match="scalar 'FK4' frame object does not support item assignment"
):
sc1[0] = sc2[0]
sc1 = FK4(obstime=obstime)
with pytest.raises(ValueError, match="cannot set frame which has no data"):
sc1[0] = sc2[0]
sc1 = FK4(sc0.ra, sc0.dec, obstime=[obstime, "B1980"])
with pytest.raises(
ValueError, match="can only set frame item from an equivalent frame"
):
sc1[0] = sc2[0]
# Wrong shape
sc1 = FK4([sc0.ra], [sc0.dec], obstime=[obstime, "B1980"])
with pytest.raises(
ValueError, match="can only set frame item from an equivalent frame"
):
sc1[0] = sc2[0]
def test_sep():
i1 = ICRS(ra=0 * u.deg, dec=1 * u.deg)
i2 = ICRS(ra=0 * u.deg, dec=2 * u.deg)
sep = i1.separation(i2)
assert_allclose(sep.deg, 1.0)
i3 = ICRS(ra=[1, 2] * u.deg, dec=[3, 4] * u.deg, distance=[5, 6] * u.kpc)
i4 = ICRS(ra=[1, 2] * u.deg, dec=[3, 4] * u.deg, distance=[4, 5] * u.kpc)
sep3d = i3.separation_3d(i4)
assert_allclose(sep3d.to(u.kpc), np.array([1, 1]) * u.kpc)
# check that it works even with velocities
i5 = ICRS(
ra=[1, 2] * u.deg,
dec=[3, 4] * u.deg,
distance=[5, 6] * u.kpc,
pm_ra_cosdec=[1, 2] * u.mas / u.yr,
pm_dec=[3, 4] * u.mas / u.yr,
radial_velocity=[5, 6] * u.km / u.s,
)
i6 = ICRS(
ra=[1, 2] * u.deg,
dec=[3, 4] * u.deg,
distance=[7, 8] * u.kpc,
pm_ra_cosdec=[1, 2] * u.mas / u.yr,
pm_dec=[3, 4] * u.mas / u.yr,
radial_velocity=[5, 6] * u.km / u.s,
)
sep3d = i5.separation_3d(i6)
assert_allclose(sep3d.to(u.kpc), np.array([2, 2]) * u.kpc)
# 3d separations of dimensionless distances should still work
i7 = ICRS(ra=1 * u.deg, dec=2 * u.deg, distance=3 * u.one)
i8 = ICRS(ra=1 * u.deg, dec=2 * u.deg, distance=4 * u.one)
sep3d = i7.separation_3d(i8)
assert_allclose(sep3d, 1 * u.one)
# but should fail with non-dimensionless
with pytest.raises(ValueError):
i7.separation_3d(i3)
def test_time_inputs():
"""
Test validation and conversion of inputs for equinox and obstime attributes.
"""
c = FK4(1 * u.deg, 2 * u.deg, equinox="J2001.5", obstime="2000-01-01 12:00:00")
assert c.equinox == Time("J2001.5")
assert c.obstime == Time("2000-01-01 12:00:00")
with pytest.raises(ValueError) as err:
c = FK4(1 * u.deg, 2 * u.deg, equinox=1.5)
assert "Invalid time input" in str(err.value)
with pytest.raises(ValueError) as err:
c = FK4(1 * u.deg, 2 * u.deg, obstime="hello")
assert "Invalid time input" in str(err.value)
# A vector time should work if the shapes match, but we don't automatically
# broadcast the basic data (just like time).
FK4([1, 2] * u.deg, [2, 3] * u.deg, obstime=["J2000", "J2001"])
with pytest.raises(ValueError) as err:
FK4(1 * u.deg, 2 * u.deg, obstime=["J2000", "J2001"])
assert "shape" in str(err.value)
def test_is_frame_attr_default():
"""
Check that the `is_frame_attr_default` machinery works as expected
"""
c1 = FK5(ra=1 * u.deg, dec=1 * u.deg)
c2 = FK5(
ra=1 * u.deg, dec=1 * u.deg, equinox=FK5.get_frame_attr_defaults()["equinox"]
)
c3 = FK5(ra=1 * u.deg, dec=1 * u.deg, equinox=Time("J2001.5"))
assert c1.equinox == c2.equinox
assert c1.equinox != c3.equinox
assert c1.is_frame_attr_default("equinox")
assert not c2.is_frame_attr_default("equinox")
assert not c3.is_frame_attr_default("equinox")
c4 = c1.realize_frame(r.UnitSphericalRepresentation(3 * u.deg, 4 * u.deg))
c5 = c2.realize_frame(r.UnitSphericalRepresentation(3 * u.deg, 4 * u.deg))
assert c4.is_frame_attr_default("equinox")
assert not c5.is_frame_attr_default("equinox")
def test_altaz_attributes():
aa = AltAz(1 * u.deg, 2 * u.deg)
assert aa.obstime is None
assert aa.location is None
aa2 = AltAz(1 * u.deg, 2 * u.deg, obstime="J2000")
assert aa2.obstime == Time("J2000")
aa3 = AltAz(
1 * u.deg, 2 * u.deg, location=EarthLocation(0 * u.deg, 0 * u.deg, 0 * u.m)
)
assert isinstance(aa3.location, EarthLocation)
def test_hadec_attributes():
hd = HADec(1 * u.hourangle, 2 * u.deg)
assert hd.ha == 1.0 * u.hourangle
assert hd.dec == 2 * u.deg
assert hd.obstime is None
assert hd.location is None
hd2 = HADec(
23 * u.hourangle,
-2 * u.deg,
obstime="J2000",
location=EarthLocation(0 * u.deg, 0 * u.deg, 0 * u.m),
)
assert_allclose(hd2.ha, -1 * u.hourangle)
assert hd2.dec == -2 * u.deg
assert hd2.obstime == Time("J2000")
assert isinstance(hd2.location, EarthLocation)
sr = hd2.represent_as(r.SphericalRepresentation)
assert_allclose(sr.lon, -1 * u.hourangle)
def test_itrs_earth_location():
loc = EarthLocation(lat=0 * u.deg, lon=0 * u.deg, height=0 * u.m)
sat = EarthLocation(
lat=-24.6609379 * u.deg, lon=160.34199789 * u.deg, height=420.17927591 * u.km
)
itrs_geo = sat.get_itrs()
eloc = itrs_geo.earth_location
assert_allclose(sat.lon, eloc.lon)
assert_allclose(sat.lat, eloc.lat)
assert_allclose(sat.height, eloc.height)
topo_itrs_repr = itrs_geo.cartesian - loc.get_itrs().cartesian
itrs_topo = ITRS(topo_itrs_repr, location=loc)
eloc = itrs_topo.earth_location
assert_allclose(sat.lon, eloc.lon)
assert_allclose(sat.lat, eloc.lat)
assert_allclose(sat.height, eloc.height)
obstime = Time("J2010") # Anything different from default
topo_itrs_repr2 = sat.get_itrs(obstime).cartesian - loc.get_itrs(obstime).cartesian
itrs_topo2 = ITRS(topo_itrs_repr2, location=loc, obstime=obstime)
eloc2 = itrs_topo2.earth_location
assert_allclose(sat.lon, eloc2.lon)
assert_allclose(sat.lat, eloc2.lat)
assert_allclose(sat.height, eloc2.height)
def test_representation():
"""
Test the getter and setter properties for `representation`
"""
# Create the frame object.
icrs = ICRS(ra=1 * u.deg, dec=1 * u.deg)
data = icrs.data
# Create some representation objects.
icrs_cart = icrs.cartesian
icrs_spher = icrs.spherical
icrs_cyl = icrs.cylindrical
# Testing when `_representation` set to `CartesianRepresentation`.
icrs.representation_type = r.CartesianRepresentation
assert icrs.representation_type == r.CartesianRepresentation
assert icrs_cart.x == icrs.x
assert icrs_cart.y == icrs.y
assert icrs_cart.z == icrs.z
assert icrs.data == data
# Testing that an ICRS object in CartesianRepresentation must not have spherical attributes.
for attr in ("ra", "dec", "distance"):
with pytest.raises(AttributeError) as err:
getattr(icrs, attr)
assert "object has no attribute" in str(err.value)
# Testing when `_representation` set to `CylindricalRepresentation`.
icrs.representation_type = r.CylindricalRepresentation
assert icrs.representation_type == r.CylindricalRepresentation
assert icrs.data == data
# Testing setter input using text argument for spherical.
icrs.representation_type = "spherical"
assert icrs.representation_type is r.SphericalRepresentation
assert icrs_spher.lat == icrs.dec
assert icrs_spher.lon == icrs.ra
assert icrs_spher.distance == icrs.distance
assert icrs.data == data
# Testing that an ICRS object in SphericalRepresentation must not have cartesian attributes.
for attr in ("x", "y", "z"):
with pytest.raises(AttributeError) as err:
getattr(icrs, attr)
assert "object has no attribute" in str(err.value)
# Testing setter input using text argument for cylindrical.
icrs.representation_type = "cylindrical"
assert icrs.representation_type is r.CylindricalRepresentation
assert icrs_cyl.rho == icrs.rho
assert icrs_cyl.phi == icrs.phi
assert icrs_cyl.z == icrs.z
assert icrs.data == data
# Testing that an ICRS object in CylindricalRepresentation must not have spherical attributes.
for attr in ("ra", "dec", "distance"):
with pytest.raises(AttributeError) as err:
getattr(icrs, attr)
assert "object has no attribute" in str(err.value)
with pytest.raises(ValueError) as err:
icrs.representation_type = "WRONG"
assert "but must be a BaseRepresentation class" in str(err.value)
with pytest.raises(ValueError) as err:
icrs.representation_type = ICRS
assert "but must be a BaseRepresentation class" in str(err.value)
def test_represent_as():
icrs = ICRS(ra=1 * u.deg, dec=1 * u.deg)
cart1 = icrs.represent_as("cartesian")
cart2 = icrs.represent_as(r.CartesianRepresentation)
cart1.x == cart2.x
cart1.y == cart2.y
cart1.z == cart2.z
# now try with velocities
icrs = ICRS(
ra=0 * u.deg,
dec=0 * u.deg,
distance=10 * u.kpc,
pm_ra_cosdec=0 * u.mas / u.yr,
pm_dec=0 * u.mas / u.yr,
radial_velocity=1 * u.km / u.s,
)
# single string
rep2 = icrs.represent_as("cylindrical")
assert isinstance(rep2, r.CylindricalRepresentation)
assert isinstance(rep2.differentials["s"], r.CylindricalDifferential)
# single class with positional in_frame_units, verify that warning raised
with pytest.warns(AstropyWarning, match="argument position") as w:
icrs.represent_as(r.CylindricalRepresentation, False)
assert len(w) == 1
# TODO: this should probably fail in the future once we figure out a better
# workaround for dealing with UnitSphericalRepresentation's with
# RadialDifferential's
# two classes
# rep2 = icrs.represent_as(r.CartesianRepresentation,
# r.SphericalCosLatDifferential)
# assert isinstance(rep2, r.CartesianRepresentation)
# assert isinstance(rep2.differentials['s'], r.SphericalCosLatDifferential)
with pytest.raises(ValueError):
icrs.represent_as("odaigahara")
def test_shorthand_representations():
rep = r.CartesianRepresentation([1, 2, 3] * u.pc)
dif = r.CartesianDifferential([1, 2, 3] * u.km / u.s)
rep = rep.with_differentials(dif)
icrs = ICRS(rep)
cyl = icrs.cylindrical
assert isinstance(cyl, r.CylindricalRepresentation)
assert isinstance(cyl.differentials["s"], r.CylindricalDifferential)
sph = icrs.spherical
assert isinstance(sph, r.SphericalRepresentation)
assert isinstance(sph.differentials["s"], r.SphericalDifferential)
sph = icrs.sphericalcoslat
assert isinstance(sph, r.SphericalRepresentation)
assert isinstance(sph.differentials["s"], r.SphericalCosLatDifferential)
def test_equal():
obstime = "B1955"
sc1 = FK4([1, 2] * u.deg, [3, 4] * u.deg, obstime=obstime)
sc2 = FK4([1, 20] * u.deg, [3, 4] * u.deg, obstime=obstime)
# Compare arrays and scalars
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
assert isinstance(v := (sc1[0] == sc2[0]), (bool, np.bool_)) and v
assert isinstance(v := (sc1[0] != sc2[0]), (bool, np.bool_)) and not v
# Broadcasting
eq = sc1[0] == sc2
ne = sc1[0] != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
# With diff only in velocity
sc1 = FK4([1, 2] * u.deg, [3, 4] * u.deg, radial_velocity=[1, 2] * u.km / u.s)
sc2 = FK4([1, 2] * u.deg, [3, 4] * u.deg, radial_velocity=[1, 20] * u.km / u.s)
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
assert isinstance(v := (sc1[0] == sc2[0]), (bool, np.bool_)) and v
assert isinstance(v := (sc1[0] != sc2[0]), (bool, np.bool_)) and not v
assert (FK4() == ICRS()) is False
assert (FK4() == FK4(obstime="J1999")) is False
def test_equal_exceptions():
# Shape mismatch
sc1 = FK4([1, 2, 3] * u.deg, [3, 4, 5] * u.deg)
with pytest.raises(ValueError, match="cannot compare: shape mismatch"):
sc1 == sc1[:2]
# Different representation_type
sc1 = FK4(1, 2, 3, representation_type="cartesian")
sc2 = FK4(1 * u.deg, 2 * u.deg, 2, representation_type="spherical")
with pytest.raises(
TypeError,
match=(
"cannot compare: objects must have same "
"class: CartesianRepresentation vs. SphericalRepresentation"
),
):
sc1 == sc2
# Different differential type
sc1 = FK4(1 * u.deg, 2 * u.deg, radial_velocity=1 * u.km / u.s)
sc2 = FK4(
1 * u.deg, 2 * u.deg, pm_ra_cosdec=1 * u.mas / u.yr, pm_dec=1 * u.mas / u.yr
)
with pytest.raises(
TypeError,
match=(
"cannot compare: objects must have same "
"class: RadialDifferential vs. UnitSphericalCosLatDifferential"
),
):
sc1 == sc2
# Different frame attribute
sc1 = FK5(1 * u.deg, 2 * u.deg)
sc2 = FK5(1 * u.deg, 2 * u.deg, equinox="J1999")
with pytest.raises(
TypeError,
match=r"cannot compare: objects must have equivalent "
r"frames: <FK5 Frame \(equinox=J2000.000\)> "
r"vs. <FK5 Frame \(equinox=J1999.000\)>",
):
sc1 == sc2
# Different frame
sc1 = FK4(1 * u.deg, 2 * u.deg)
sc2 = FK5(1 * u.deg, 2 * u.deg, equinox="J2000")
with pytest.raises(
TypeError,
match="cannot compare: objects must have equivalent "
r"frames: <FK4 Frame \(equinox=B1950.000, obstime=B1950.000\)> "
r"vs. <FK5 Frame \(equinox=J2000.000\)>",
):
sc1 == sc2
sc1 = FK4(1 * u.deg, 2 * u.deg)
sc2 = FK4()
with pytest.raises(
ValueError, match="cannot compare: one frame has data and the other does not"
):
sc1 == sc2
with pytest.raises(
ValueError, match="cannot compare: one frame has data and the other does not"
):
sc2 == sc1
def test_dynamic_attrs():
c = ICRS(1 * u.deg, 2 * u.deg)
assert "ra" in dir(c)
assert "dec" in dir(c)
with pytest.raises(AttributeError) as err:
c.blahblah
assert "object has no attribute 'blahblah'" in str(err.value)
with pytest.raises(AttributeError) as err:
c.ra = 1
assert "Cannot set any frame attribute" in str(err.value)
c.blahblah = 1
assert c.blahblah == 1
def test_nodata_error():
i = ICRS()
with pytest.raises(ValueError) as excinfo:
i.data
assert "does not have associated data" in str(excinfo.value)
def test_len0_data():
i = ICRS([] * u.deg, [] * u.deg)
assert i.has_data
repr(i)
def test_quantity_attributes():
# make sure we can create a GCRS frame with valid inputs
GCRS(obstime="J2002", obsgeoloc=[1, 2, 3] * u.km, obsgeovel=[4, 5, 6] * u.km / u.s)
# make sure it fails for invalid lovs or vels
with pytest.raises(TypeError):
GCRS(obsgeoloc=[1, 2, 3]) # no unit
with pytest.raises(u.UnitsError):
GCRS(obsgeoloc=[1, 2, 3] * u.km / u.s) # incorrect unit
with pytest.raises(ValueError):
GCRS(obsgeoloc=[1, 3] * u.km) # incorrect shape
def test_quantity_attribute_default():
# The default default (yes) is None:
class MyCoord(BaseCoordinateFrame):
someval = QuantityAttribute(unit=u.deg)
frame = MyCoord()
assert frame.someval is None
frame = MyCoord(someval=15 * u.deg)
assert u.isclose(frame.someval, 15 * u.deg)
# This should work if we don't explicitly pass in a unit, but we pass in a
# default value with a unit
class MyCoord2(BaseCoordinateFrame):
someval = QuantityAttribute(15 * u.deg)
frame = MyCoord2()
assert u.isclose(frame.someval, 15 * u.deg)
# Since here no shape was given, we can set to any shape we like.
frame = MyCoord2(someval=np.ones(3) * u.deg)
assert frame.someval.shape == (3,)
assert np.all(frame.someval == 1 * u.deg)
# We should also be able to insist on a given shape.
class MyCoord3(BaseCoordinateFrame):
someval = QuantityAttribute(unit=u.arcsec, shape=(3,))
frame = MyCoord3(someval=np.ones(3) * u.deg)
assert frame.someval.shape == (3,)
assert frame.someval.unit == u.arcsec
assert u.allclose(frame.someval.value, 3600.0)
# The wrong shape raises.
with pytest.raises(ValueError, match="shape"):
MyCoord3(someval=1.0 * u.deg)
# As does the wrong unit.
with pytest.raises(u.UnitsError):
MyCoord3(someval=np.ones(3) * u.m)
# We are allowed a short-cut for zero.
frame0 = MyCoord3(someval=0)
assert frame0.someval.shape == (3,)
assert frame0.someval.unit == u.arcsec
assert np.all(frame0.someval.value == 0.0)
# But not if it has the wrong shape.
with pytest.raises(ValueError, match="shape"):
MyCoord3(someval=np.zeros(2))
# This should fail, if we don't pass in a default or a unit
with pytest.raises(ValueError):
class MyCoord(BaseCoordinateFrame):
someval = QuantityAttribute()
def test_eloc_attributes():
el = EarthLocation(lon=12.3 * u.deg, lat=45.6 * u.deg, height=1 * u.km)
it = ITRS(
r.SphericalRepresentation(lon=12.3 * u.deg, lat=45.6 * u.deg, distance=1 * u.km)
)
gc = GCRS(ra=12.3 * u.deg, dec=45.6 * u.deg, distance=6375 * u.km)
el1 = AltAz(location=el).location
assert isinstance(el1, EarthLocation)
# these should match *exactly* because the EarthLocation
assert el1.lat == el.lat
assert el1.lon == el.lon
assert el1.height == el.height
el2 = AltAz(location=it).location
assert isinstance(el2, EarthLocation)
# these should *not* match because giving something in Spherical ITRS is
# *not* the same as giving it as an EarthLocation: EarthLocation is on an
# elliptical geoid. So the longitude should match (because flattening is
# only along the z-axis), but latitude should not. Also, height is relative
# to the *surface* in EarthLocation, but the ITRS distance is relative to
# the center of the Earth
assert not allclose(el2.lat, it.spherical.lat)
assert allclose(el2.lon, it.spherical.lon)
assert el2.height < -6000 * u.km
el3 = AltAz(location=gc).location
# GCRS inputs implicitly get transformed to ITRS and then onto
# EarthLocation's elliptical geoid. So both lat and lon shouldn't match
assert isinstance(el3, EarthLocation)
assert not allclose(el3.lat, gc.dec)
assert not allclose(el3.lon, gc.ra)
assert np.abs(el3.height) < 500 * u.km
def test_equivalent_frames():
i = ICRS()
i2 = ICRS(1 * u.deg, 2 * u.deg)
assert i.is_equivalent_frame(i)
assert i.is_equivalent_frame(i2)
with pytest.raises(TypeError):
assert i.is_equivalent_frame(10)
with pytest.raises(TypeError):
assert i2.is_equivalent_frame(SkyCoord(i2))
f0 = FK5() # this J2000 is TT
f1 = FK5(equinox="J2000")
f2 = FK5(1 * u.deg, 2 * u.deg, equinox="J2000")
f3 = FK5(equinox="J2010")
f4 = FK4(equinox="J2010")
assert f1.is_equivalent_frame(f1)
assert not i.is_equivalent_frame(f1)
assert f0.is_equivalent_frame(f1)
assert f1.is_equivalent_frame(f2)
assert not f1.is_equivalent_frame(f3)
assert not f3.is_equivalent_frame(f4)
aa1 = AltAz()
aa2 = AltAz(obstime="J2010")
assert aa2.is_equivalent_frame(aa2)
assert not aa1.is_equivalent_frame(i)
assert not aa1.is_equivalent_frame(aa2)
def test_equivalent_frame_coordinateattribute():
class FrameWithCoordinateAttribute(BaseCoordinateFrame):
coord_attr = CoordinateAttribute(HCRS)
# These frames should not be considered equivalent
f0 = FrameWithCoordinateAttribute()
f1 = FrameWithCoordinateAttribute(
coord_attr=HCRS(1 * u.deg, 2 * u.deg, obstime="J2000")
)
f2 = FrameWithCoordinateAttribute(
coord_attr=HCRS(3 * u.deg, 4 * u.deg, obstime="J2000")
)
f3 = FrameWithCoordinateAttribute(
coord_attr=HCRS(1 * u.deg, 2 * u.deg, obstime="J2001")
)
assert not f0.is_equivalent_frame(f1)
assert not f1.is_equivalent_frame(f0)
assert not f1.is_equivalent_frame(f2)
assert not f1.is_equivalent_frame(f3)
assert not f2.is_equivalent_frame(f3)
# They each should still be equivalent to a deep copy of themselves
assert f0.is_equivalent_frame(deepcopy(f0))
assert f1.is_equivalent_frame(deepcopy(f1))
assert f2.is_equivalent_frame(deepcopy(f2))
assert f3.is_equivalent_frame(deepcopy(f3))
def test_equivalent_frame_locationattribute():
class FrameWithLocationAttribute(BaseCoordinateFrame):
loc_attr = EarthLocationAttribute()
# These frames should not be considered equivalent
f0 = FrameWithLocationAttribute()
location = EarthLocation(lat=-34, lon=19, height=300)
f1 = FrameWithLocationAttribute(loc_attr=location)
assert not f0.is_equivalent_frame(f1)
assert not f1.is_equivalent_frame(f0)
# They each should still be equivalent to a deep copy of themselves
assert f0.is_equivalent_frame(deepcopy(f0))
assert f1.is_equivalent_frame(deepcopy(f1))
def test_representation_subclass():
# Regression test for #3354
# Normally when instantiating a frame without a distance the frame will try
# and use UnitSphericalRepresentation internally instead of
# SphericalRepresentation.
frame = FK5(
representation_type=r.SphericalRepresentation, ra=32 * u.deg, dec=20 * u.deg
)
assert type(frame._data) == r.UnitSphericalRepresentation
assert frame.representation_type == r.SphericalRepresentation
# If using a SphericalRepresentation class this used to not work, so we
# test here that this is now fixed.
class NewSphericalRepresentation(r.SphericalRepresentation):
attr_classes = r.SphericalRepresentation.attr_classes
frame = FK5(
representation_type=NewSphericalRepresentation, lon=32 * u.deg, lat=20 * u.deg
)
assert type(frame._data) == r.UnitSphericalRepresentation
assert frame.representation_type == NewSphericalRepresentation
# A similar issue then happened in __repr__ with subclasses of
# SphericalRepresentation.
assert (
repr(frame)
== "<FK5 Coordinate (equinox=J2000.000): (lon, lat) in deg\n (32., 20.)>"
)
# A more subtle issue is when specifying a custom
# UnitSphericalRepresentation subclass for the data and
# SphericalRepresentation or a subclass for the representation.
class NewUnitSphericalRepresentation(r.UnitSphericalRepresentation):
attr_classes = r.UnitSphericalRepresentation.attr_classes
def __repr__(self):
return "<NewUnitSphericalRepresentation: spam spam spam>"
frame = FK5(
NewUnitSphericalRepresentation(lon=32 * u.deg, lat=20 * u.deg),
representation_type=NewSphericalRepresentation,
)
assert repr(frame) == "<FK5 Coordinate (equinox=J2000.000): spam spam spam>"
def test_getitem_representation():
"""
Make sure current representation survives __getitem__ even if different
from data representation.
"""
c = ICRS([1, 1] * u.deg, [2, 2] * u.deg)
c.representation_type = "cartesian"
assert c[0].representation_type is r.CartesianRepresentation
def test_component_error_useful():
"""
Check that a data-less frame gives useful error messages about not having
data when the attributes asked for are possible coordinate components
"""
i = ICRS()
with pytest.raises(ValueError) as excinfo:
i.ra
assert "does not have associated data" in str(excinfo.value)
with pytest.raises(AttributeError) as excinfo1:
i.foobar
with pytest.raises(AttributeError) as excinfo2:
i.lon # lon is *not* the component name despite being the underlying representation's name
assert "object has no attribute 'foobar'" in str(excinfo1.value)
assert "object has no attribute 'lon'" in str(excinfo2.value)
def test_cache_clear():
i = ICRS(1 * u.deg, 2 * u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
assert len(i.cache["representation"]) == 2
i.cache.clear()
assert len(i.cache["representation"]) == 0
def test_inplace_array():
i = ICRS([[1, 2], [3, 4]] * u.deg, [[10, 20], [30, 40]] * u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
# Check that repr() has added a rep to the cache
assert len(i.cache["representation"]) == 2
# Modify the data
i.data.lon[:, 0] = [100, 200] * u.deg
# Clear the cache
i.cache.clear()
# This will use a second (potentially cached rep)
assert_allclose(i.ra, [[100, 2], [200, 4]] * u.deg)
assert_allclose(i.dec, [[10, 20], [30, 40]] * u.deg)
def test_inplace_change():
i = ICRS(1 * u.deg, 2 * u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
# Check that repr() has added a rep to the cache
assert len(i.cache["representation"]) == 2
# Modify the data
i.data.lon[()] = 10 * u.deg
# Clear the cache
i.cache.clear()
# This will use a second (potentially cached rep)
assert i.ra == 10 * u.deg
assert i.dec == 2 * u.deg
def test_representation_with_multiple_differentials():
dif1 = r.CartesianDifferential([1, 2, 3] * u.km / u.s)
dif2 = r.CartesianDifferential([1, 2, 3] * u.km / u.s**2)
rep = r.CartesianRepresentation(
[1, 2, 3] * u.pc, differentials={"s": dif1, "s2": dif2}
)
# check warning is raised for a scalar
with pytest.raises(ValueError):
ICRS(rep)
def test_missing_component_error_names():
"""
This test checks that the component names are frame component names, not
representation or differential names, when referenced in an exception raised
when not passing in enough data. For example:
ICRS(ra=10*u.deg)
should state:
TypeError: __init__() missing 1 required positional argument: 'dec'
"""
with pytest.raises(TypeError) as e:
ICRS(ra=150 * u.deg)
assert "missing 1 required positional argument: 'dec'" in str(e.value)
with pytest.raises(TypeError) as e:
ICRS(
ra=150 * u.deg,
dec=-11 * u.deg,
pm_ra=100 * u.mas / u.yr,
pm_dec=10 * u.mas / u.yr,
)
assert "pm_ra_cosdec" in str(e.value)
def test_non_spherical_representation_unit_creation(unitphysics): # noqa: F811
class PhysicsICRS(ICRS):
default_representation = r.PhysicsSphericalRepresentation
pic = PhysicsICRS(phi=1 * u.deg, theta=25 * u.deg, r=1 * u.kpc)
assert isinstance(pic.data, r.PhysicsSphericalRepresentation)
picu = PhysicsICRS(phi=1 * u.deg, theta=25 * u.deg)
assert isinstance(picu.data, unitphysics)
def test_attribute_repr():
class Spam:
def _astropy_repr_in_frame(self):
return "TEST REPR"
class TestFrame(BaseCoordinateFrame):
attrtest = Attribute(default=Spam())
assert "TEST REPR" in repr(TestFrame())
def test_component_names_repr():
# Frame class with new component names that includes a name swap
class NameChangeFrame(BaseCoordinateFrame):
default_representation = r.PhysicsSphericalRepresentation
frame_specific_representation_info = {
r.PhysicsSphericalRepresentation: [
RepresentationMapping("phi", "theta", u.deg),
RepresentationMapping("theta", "phi", u.arcsec),
RepresentationMapping("r", "JUSTONCE", u.AU),
]
}
frame = NameChangeFrame(0 * u.deg, 0 * u.arcsec, 0 * u.AU)
# Check for the new names in the Frame repr
assert "(theta, phi, JUSTONCE)" in repr(frame)
# Check that the letter "r" has not been replaced more than once in the Frame repr
assert repr(frame).count("JUSTONCE") == 1
def test_galactocentric_defaults():
with galactocentric_frame_defaults.set("pre-v4.0"):
galcen_pre40 = Galactocentric()
with galactocentric_frame_defaults.set("v4.0"):
galcen_40 = Galactocentric()
with galactocentric_frame_defaults.set("latest"):
galcen_latest = Galactocentric()
# parameters that changed
assert not u.allclose(galcen_pre40.galcen_distance, galcen_40.galcen_distance)
assert not u.allclose(galcen_pre40.z_sun, galcen_40.z_sun)
for k in galcen_40.frame_attributes:
if isinstance(getattr(galcen_40, k), BaseCoordinateFrame):
continue # skip coordinate comparison...
elif isinstance(getattr(galcen_40, k), CartesianDifferential):
assert u.allclose(
getattr(galcen_40, k).d_xyz, getattr(galcen_latest, k).d_xyz
)
else:
assert getattr(galcen_40, k) == getattr(galcen_latest, k)
# test validate Galactocentric
with galactocentric_frame_defaults.set("latest"):
params = galactocentric_frame_defaults.validate(galcen_latest)
references = galcen_latest.frame_attribute_references
state = dict(parameters=params, references=references)
assert galactocentric_frame_defaults.parameters == params
assert galactocentric_frame_defaults.references == references
assert galactocentric_frame_defaults._state == state
# Test not one of accepted parameter types
with pytest.raises(ValueError):
galactocentric_frame_defaults.validate(ValueError)
# test parameters property
assert (
galactocentric_frame_defaults.parameters
== galactocentric_frame_defaults.parameters
)
def test_galactocentric_references():
# references in the "scientific paper"-sense
with galactocentric_frame_defaults.set("pre-v4.0"):
galcen_pre40 = Galactocentric()
for k in galcen_pre40.frame_attributes:
if k == "roll": # no reference for this parameter
continue
assert k in galcen_pre40.frame_attribute_references
with galactocentric_frame_defaults.set("v4.0"):
galcen_40 = Galactocentric()
for k in galcen_40.frame_attributes:
if k == "roll": # no reference for this parameter
continue
assert k in galcen_40.frame_attribute_references
with galactocentric_frame_defaults.set("v4.0"):
galcen_custom = Galactocentric(z_sun=15 * u.pc)
for k in galcen_custom.frame_attributes:
if k == "roll": # no reference for this parameter
continue
if k == "z_sun":
assert k not in galcen_custom.frame_attribute_references
else:
assert k in galcen_custom.frame_attribute_references
def test_coordinateattribute_transformation():
class FrameWithCoordinateAttribute(BaseCoordinateFrame):
coord_attr = CoordinateAttribute(HCRS)
hcrs = HCRS(1 * u.deg, 2 * u.deg, 3 * u.AU, obstime="2001-02-03")
f1_frame = FrameWithCoordinateAttribute(coord_attr=hcrs)
f1_skycoord = FrameWithCoordinateAttribute(coord_attr=SkyCoord(hcrs))
# The input is already HCRS, so the frame attribute should not change it
assert f1_frame.coord_attr == hcrs
# The output should not be different if a SkyCoord is provided
assert f1_skycoord.coord_attr == f1_frame.coord_attr
gcrs = GCRS(4 * u.deg, 5 * u.deg, 6 * u.AU, obstime="2004-05-06")
f2_frame = FrameWithCoordinateAttribute(coord_attr=gcrs)
f2_skycoord = FrameWithCoordinateAttribute(coord_attr=SkyCoord(gcrs))
# The input needs to be converted from GCRS to HCRS
assert isinstance(f2_frame.coord_attr, HCRS)
# The `obstime` frame attribute should have been "merged" in a SkyCoord-style transformation
assert f2_frame.coord_attr.obstime == gcrs.obstime
# The output should not be different if a SkyCoord is provided
assert f2_skycoord.coord_attr == f2_frame.coord_attr
def test_realize_frame_accepts_kwargs():
c1 = ICRS(
x=1 * u.pc,
y=2 * u.pc,
z=3 * u.pc,
representation_type=r.CartesianRepresentation,
)
new_data = r.CartesianRepresentation(x=11 * u.pc, y=12 * u.pc, z=13 * u.pc)
c2 = c1.realize_frame(new_data, representation_type="cartesian")
c3 = c1.realize_frame(new_data, representation_type="cylindrical")
assert c2.representation_type == r.CartesianRepresentation
assert c3.representation_type == r.CylindricalRepresentation
def test_nameless_frame_subclass():
"""Note: this is a regression test for #11096"""
class Test:
pass
# Subclass from a frame class and a non-frame class.
# This subclassing is the test!
class NewFrame(ICRS, Test):
pass
def test_frame_coord_comparison():
"""Test that frame can be compared to a SkyCoord"""
frame = ICRS(0 * u.deg, 0 * u.deg)
coord = SkyCoord(frame)
other = SkyCoord(ICRS(0 * u.deg, 1 * u.deg))
assert frame == coord
assert frame != other
assert not (frame == other)
error_msg = "objects must have equivalent frames"
with pytest.raises(TypeError, match=error_msg):
frame == SkyCoord(AltAz("0d", "1d"))
coord = SkyCoord(ra=12 * u.hourangle, dec=5 * u.deg, frame=FK5(equinox="J1950"))
frame = FK5(ra=12 * u.hourangle, dec=5 * u.deg, equinox="J2000")
with pytest.raises(TypeError, match=error_msg):
coord == frame
frame = ICRS()
coord = SkyCoord(0 * u.deg, 0 * u.deg, frame=frame)
error_msg = "Can only compare SkyCoord to Frame with data"
with pytest.raises(ValueError, match=error_msg):
frame == coord
|
c241dd83c2fd9329c67d5e0ff46bd4ce3281bcb56d596789571956e6e1dee41b | """Test helper functions for coordinates."""
import numpy as np
def skycoord_equal(sc1, sc2):
"""SkyCoord equality useful for testing."""
if not sc1.is_equivalent_frame(sc2):
return False
if sc1.representation_type is not sc2.representation_type:
return False
if sc1.shape != sc2.shape:
return False # Maybe raise ValueError corresponding to future numpy behavior
eq = np.ones(shape=sc1.shape, dtype=bool)
for comp in sc1.data.components:
eq &= getattr(sc1.data, comp) == getattr(sc2.data, comp)
return np.all(eq)
|
37e23b333c4eefb14ed0af7871206edb08f61d4d09a10ce8794b8217c48dbbb6 | """
This series of functions are used to generate the reference CSV files
used by the accuracy tests. Running this as a command-line script will
generate them all.
"""
import os
import numpy as np
from astropy.table import Column, Table
def ref_fk4_no_e_fk4(fnout="fk4_no_e_fk4.csv"):
"""
Accuracy tests for the FK4 (with no E-terms of aberration) to/from FK4
conversion, with arbitrary equinoxes and epoch of observation.
"""
import starlink.Ast as Ast
np.random.seed(12345)
N = 200
# Sample uniformly on the unit sphere. These will be either the FK4
# coordinates for the transformation to FK5, or the FK5 coordinates for the
# transformation to FK4.
ra = np.random.uniform(0.0, 360.0, N)
dec = np.degrees(np.arcsin(np.random.uniform(-1.0, 1.0, N)))
# Generate random observation epoch and equinoxes
obstime = [f"B{x:7.2f}" for x in np.random.uniform(1950.0, 2000.0, N)]
ra_fk4ne, dec_fk4ne = [], []
ra_fk4, dec_fk4 = [], []
for i in range(N):
# Set up frames for AST
frame_fk4ne = Ast.SkyFrame(f"System=FK4-NO-E,Epoch={obstime[i]},Equinox=B1950")
frame_fk4 = Ast.SkyFrame(f"System=FK4,Epoch={obstime[i]},Equinox=B1950")
# FK4 to FK4 (no E-terms)
frameset = frame_fk4.convert(frame_fk4ne)
coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]]))
ra_fk4ne.append(coords[0, 0])
dec_fk4ne.append(coords[1, 0])
# FK4 (no E-terms) to FK4
frameset = frame_fk4ne.convert(frame_fk4)
coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]]))
ra_fk4.append(coords[0, 0])
dec_fk4.append(coords[1, 0])
# Write out table to a CSV file
t = Table()
t.add_column(Column(name="obstime", data=obstime))
t.add_column(Column(name="ra_in", data=ra))
t.add_column(Column(name="dec_in", data=dec))
t.add_column(Column(name="ra_fk4ne", data=ra_fk4ne))
t.add_column(Column(name="dec_fk4ne", data=dec_fk4ne))
t.add_column(Column(name="ra_fk4", data=ra_fk4))
t.add_column(Column(name="dec_fk4", data=dec_fk4))
f = open(os.path.join("data", fnout), "wb")
f.write(
f"# This file was generated with the {os.path.basename(__file__)} script, and"
" the reference values were computed using AST\n"
)
t.write(f, format="ascii", delimiter=",")
def ref_fk4_no_e_fk5(fnout="fk4_no_e_fk5.csv"):
"""
Accuracy tests for the FK4 (with no E-terms of aberration) to/from FK5
conversion, with arbitrary equinoxes and epoch of observation.
"""
import starlink.Ast as Ast
np.random.seed(12345)
N = 200
# Sample uniformly on the unit sphere. These will be either the FK4
# coordinates for the transformation to FK5, or the FK5 coordinates for the
# transformation to FK4.
ra = np.random.uniform(0.0, 360.0, N)
dec = np.degrees(np.arcsin(np.random.uniform(-1.0, 1.0, N)))
# Generate random observation epoch and equinoxes
obstime = [f"B{x:7.2f}" for x in np.random.uniform(1950.0, 2000.0, N)]
equinox_fk4 = [f"B{x:7.2f}" for x in np.random.uniform(1925.0, 1975.0, N)]
equinox_fk5 = [f"J{x:7.2f}" for x in np.random.uniform(1975.0, 2025.0, N)]
ra_fk4, dec_fk4 = [], []
ra_fk5, dec_fk5 = [], []
for i in range(N):
# Set up frames for AST
frame_fk4 = Ast.SkyFrame(
f"System=FK4-NO-E,Epoch={obstime[i]},Equinox={equinox_fk4[i]}"
)
frame_fk5 = Ast.SkyFrame(
f"System=FK5,Epoch={obstime[i]},Equinox={equinox_fk5[i]}"
)
# FK4 to FK5
frameset = frame_fk4.convert(frame_fk5)
coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]]))
ra_fk5.append(coords[0, 0])
dec_fk5.append(coords[1, 0])
# FK5 to FK4
frameset = frame_fk5.convert(frame_fk4)
coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]]))
ra_fk4.append(coords[0, 0])
dec_fk4.append(coords[1, 0])
# Write out table to a CSV file
t = Table()
t.add_column(Column(name="equinox_fk4", data=equinox_fk4))
t.add_column(Column(name="equinox_fk5", data=equinox_fk5))
t.add_column(Column(name="obstime", data=obstime))
t.add_column(Column(name="ra_in", data=ra))
t.add_column(Column(name="dec_in", data=dec))
t.add_column(Column(name="ra_fk5", data=ra_fk5))
t.add_column(Column(name="dec_fk5", data=dec_fk5))
t.add_column(Column(name="ra_fk4", data=ra_fk4))
t.add_column(Column(name="dec_fk4", data=dec_fk4))
f = open(os.path.join("data", fnout), "wb")
f.write(
f"# This file was generated with the {os.path.basename(__file__)} script, and"
" the reference values were computed using AST\n"
)
t.write(f, format="ascii", delimiter=",")
def ref_galactic_fk4(fnout="galactic_fk4.csv"):
"""
Accuracy tests for the ICRS (with no E-terms of aberration) to/from FK5
conversion, with arbitrary equinoxes and epoch of observation.
"""
import starlink.Ast as Ast
np.random.seed(12345)
N = 200
# Sample uniformly on the unit sphere. These will be either the ICRS
# coordinates for the transformation to FK5, or the FK5 coordinates for the
# transformation to ICRS.
lon = np.random.uniform(0.0, 360.0, N)
lat = np.degrees(np.arcsin(np.random.uniform(-1.0, 1.0, N)))
# Generate random observation epoch and equinoxes
obstime = [f"B{x:7.2f}" for x in np.random.uniform(1950.0, 2000.0, N)]
equinox_fk4 = [f"J{x:7.2f}" for x in np.random.uniform(1975.0, 2025.0, N)]
lon_gal, lat_gal = [], []
ra_fk4, dec_fk4 = [], []
for i in range(N):
# Set up frames for AST
frame_gal = Ast.SkyFrame(f"System=Galactic,Epoch={obstime[i]}")
frame_fk4 = Ast.SkyFrame(
f"System=FK4,Epoch={obstime[i]},Equinox={equinox_fk4[i]}"
)
# ICRS to FK5
frameset = frame_gal.convert(frame_fk4)
coords = np.degrees(frameset.tran([[np.radians(lon[i])], [np.radians(lat[i])]]))
ra_fk4.append(coords[0, 0])
dec_fk4.append(coords[1, 0])
# FK5 to ICRS
frameset = frame_fk4.convert(frame_gal)
coords = np.degrees(frameset.tran([[np.radians(lon[i])], [np.radians(lat[i])]]))
lon_gal.append(coords[0, 0])
lat_gal.append(coords[1, 0])
# Write out table to a CSV file
t = Table()
t.add_column(Column(name="equinox_fk4", data=equinox_fk4))
t.add_column(Column(name="obstime", data=obstime))
t.add_column(Column(name="lon_in", data=lon))
t.add_column(Column(name="lat_in", data=lat))
t.add_column(Column(name="ra_fk4", data=ra_fk4))
t.add_column(Column(name="dec_fk4", data=dec_fk4))
t.add_column(Column(name="lon_gal", data=lon_gal))
t.add_column(Column(name="lat_gal", data=lat_gal))
f = open(os.path.join("data", fnout), "wb")
f.write(
f"# This file was generated with the {os.path.basename(__file__)} script, and"
" the reference values were computed using AST\n"
)
t.write(f, format="ascii", delimiter=",")
def ref_icrs_fk5(fnout="icrs_fk5.csv"):
"""
Accuracy tests for the ICRS (with no E-terms of aberration) to/from FK5
conversion, with arbitrary equinoxes and epoch of observation.
"""
import starlink.Ast as Ast
np.random.seed(12345)
N = 200
# Sample uniformly on the unit sphere. These will be either the ICRS
# coordinates for the transformation to FK5, or the FK5 coordinates for the
# transformation to ICRS.
ra = np.random.uniform(0.0, 360.0, N)
dec = np.degrees(np.arcsin(np.random.uniform(-1.0, 1.0, N)))
# Generate random observation epoch and equinoxes
obstime = [f"B{x:7.2f}" for x in np.random.uniform(1950.0, 2000.0, N)]
equinox_fk5 = [f"J{x:7.2f}" for x in np.random.uniform(1975.0, 2025.0, N)]
ra_icrs, dec_icrs = [], []
ra_fk5, dec_fk5 = [], []
for i in range(N):
# Set up frames for AST
frame_icrs = Ast.SkyFrame(f"System=ICRS,Epoch={obstime[i]}")
frame_fk5 = Ast.SkyFrame(
f"System=FK5,Epoch={obstime[i]},Equinox={equinox_fk5[i]}"
)
# ICRS to FK5
frameset = frame_icrs.convert(frame_fk5)
coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]]))
ra_fk5.append(coords[0, 0])
dec_fk5.append(coords[1, 0])
# FK5 to ICRS
frameset = frame_fk5.convert(frame_icrs)
coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]]))
ra_icrs.append(coords[0, 0])
dec_icrs.append(coords[1, 0])
# Write out table to a CSV file
t = Table()
t.add_column(Column(name="equinox_fk5", data=equinox_fk5))
t.add_column(Column(name="obstime", data=obstime))
t.add_column(Column(name="ra_in", data=ra))
t.add_column(Column(name="dec_in", data=dec))
t.add_column(Column(name="ra_fk5", data=ra_fk5))
t.add_column(Column(name="dec_fk5", data=dec_fk5))
t.add_column(Column(name="ra_icrs", data=ra_icrs))
t.add_column(Column(name="dec_icrs", data=dec_icrs))
f = open(os.path.join("data", fnout), "wb")
f.write(
f"# This file was generated with the {os.path.basename(__file__)} script, and"
" the reference values were computed using AST\n"
)
t.write(f, format="ascii", delimiter=",")
if __name__ == "__main__":
ref_fk4_no_e_fk4()
ref_fk4_no_e_fk5()
ref_galactic_fk4()
ref_icrs_fk5()
|
ac9a8bb8df7223092faffd205ece957903bbf78da10a30c92482ed673d74f870 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import errno
import gzip
import http.client
import io
import mmap
import operator
import os
import re
import sys
import tempfile
import warnings
import zipfile
from functools import reduce
import numpy as np
# NOTE: Python can be built without bz2.
from astropy.utils.compat.optional_deps import HAS_BZ2
from astropy.utils.data import (
_is_url,
_requires_fsspec,
download_file,
get_readable_fileobj,
)
from astropy.utils.decorators import classproperty
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import NOT_OVERWRITING_MSG
from .util import (
_array_from_file,
_array_to_file,
_write_string,
fileobj_closed,
fileobj_mode,
fileobj_name,
isfile,
isreadable,
iswritable,
path_like,
)
if HAS_BZ2:
import bz2
# Maps astropy.io.fits-specific file mode names to the appropriate file
# modes to use for the underlying raw files.
IO_FITS_MODES = {
"readonly": "rb",
"copyonwrite": "rb",
"update": "rb+",
"append": "ab+",
"ostream": "wb",
"denywrite": "rb",
}
# Maps OS-level file modes to the appropriate astropy.io.fits specific mode
# to use when given file objects but no mode specified; obviously in
# IO_FITS_MODES there are overlaps; for example 'readonly' and 'denywrite'
# both require the file to be opened in 'rb' mode. But 'readonly' is the
# default behavior for such files if not otherwise specified.
# Note: 'ab' is only supported for 'ostream' which is output-only.
FILE_MODES = {
"rb": "readonly",
"rb+": "update",
"wb": "ostream",
"wb+": "update",
"ab": "ostream",
"ab+": "append",
}
# A match indicates the file was opened in text mode, which is not allowed
TEXT_RE = re.compile(r"^[rwa]((t?\+?)|(\+?t?))$")
# readonly actually uses copyonwrite for mmap so that readonly without mmap and
# with mmap still have to same behavior with regard to updating the array. To
# get a truly readonly mmap use denywrite
# the name 'denywrite' comes from a deprecated flag to mmap() on Linux--it
# should be clarified that 'denywrite' mode is not directly analogous to the
# use of that flag; it was just taken, for lack of anything better, as a name
# that means something like "read only" but isn't readonly.
MEMMAP_MODES = {
"readonly": mmap.ACCESS_COPY,
"copyonwrite": mmap.ACCESS_COPY,
"update": mmap.ACCESS_WRITE,
"append": mmap.ACCESS_COPY,
"denywrite": mmap.ACCESS_READ,
}
# TODO: Eventually raise a warning, and maybe even later disable the use of
# 'copyonwrite' and 'denywrite' modes unless memmap=True. For now, however,
# that would generate too many warnings for too many users. If nothing else,
# wait until the new logging system is in place.
GZIP_MAGIC = b"\x1f\x8b\x08"
PKZIP_MAGIC = b"\x50\x4b\x03\x04"
BZIP2_MAGIC = b"\x42\x5a"
def _is_bz2file(fileobj):
if HAS_BZ2:
return isinstance(fileobj, bz2.BZ2File)
else:
return False
def _normalize_fits_mode(mode):
if mode is not None and mode not in IO_FITS_MODES:
if TEXT_RE.match(mode):
raise ValueError(
f"Text mode '{mode}' not supported: files must be opened in binary mode"
)
new_mode = FILE_MODES.get(mode)
if new_mode not in IO_FITS_MODES:
raise ValueError(f"Mode '{mode}' not recognized")
mode = new_mode
return mode
class _File:
"""
Represents a FITS file on disk (or in some other file-like object).
"""
def __init__(
self,
fileobj=None,
mode=None,
memmap=None,
overwrite=False,
cache=True,
*,
use_fsspec=None,
fsspec_kwargs=None,
):
self.strict_memmap = bool(memmap)
memmap = True if memmap is None else memmap
self._file = None
self.closed = False
self.binary = True
self.mode = mode
self.memmap = memmap
self.compression = None
self.readonly = False
self.writeonly = False
# Should the object be closed on error: see
# https://github.com/astropy/astropy/issues/6168
self.close_on_error = False
# Holds mmap instance for files that use mmap
self._mmap = None
if fileobj is None:
self.simulateonly = True
return
else:
self.simulateonly = False
if isinstance(fileobj, os.PathLike):
fileobj = os.fspath(fileobj)
if mode is not None and mode not in IO_FITS_MODES:
raise ValueError(f"Mode '{mode}' not recognized")
if isfile(fileobj):
objmode = _normalize_fits_mode(fileobj_mode(fileobj))
if mode is not None and mode != objmode:
raise ValueError(
"Requested FITS mode '{}' not compatible with open file "
"handle mode '{}'".format(mode, objmode)
)
mode = objmode
if mode is None:
mode = "readonly"
# Handle cloud-hosted files using the optional ``fsspec`` dependency
if (use_fsspec or _requires_fsspec(fileobj)) and mode != "ostream":
# Note: we don't use `get_readable_fileobj` as a context manager
# because io.fits takes care of closing files itself
fileobj = get_readable_fileobj(
fileobj,
encoding="binary",
use_fsspec=use_fsspec,
fsspec_kwargs=fsspec_kwargs,
close_files=False,
).__enter__()
# Handle raw URLs
if (
isinstance(fileobj, (str, bytes))
and mode not in ("ostream", "append", "update")
and _is_url(fileobj)
):
self.name = download_file(fileobj, cache=cache)
# Handle responses from URL requests that have already been opened
elif isinstance(fileobj, http.client.HTTPResponse):
if mode in ("ostream", "append", "update"):
raise ValueError(f"Mode {mode} not supported for HTTPResponse")
fileobj = io.BytesIO(fileobj.read())
else:
if isinstance(fileobj, path_like):
fileobj = os.path.expanduser(fileobj)
self.name = fileobj_name(fileobj)
self.mode = mode
# Underlying fileobj is a file-like object, but an actual file object
self.file_like = False
# Initialize the internal self._file object
if isfile(fileobj):
self._open_fileobj(fileobj, mode, overwrite)
elif isinstance(fileobj, (str, bytes)):
self._open_filename(fileobj, mode, overwrite)
else:
self._open_filelike(fileobj, mode, overwrite)
self.fileobj_mode = fileobj_mode(self._file)
if isinstance(fileobj, gzip.GzipFile):
self.compression = "gzip"
elif isinstance(fileobj, zipfile.ZipFile):
# Reading from zip files is supported but not writing (yet)
self.compression = "zip"
elif _is_bz2file(fileobj):
self.compression = "bzip2"
if mode in ("readonly", "copyonwrite", "denywrite") or (
self.compression and mode == "update"
):
self.readonly = True
elif mode == "ostream" or (self.compression and mode == "append"):
self.writeonly = True
# For 'ab+' mode, the pointer is at the end after the open in
# Linux, but is at the beginning in Solaris.
if mode == "ostream" or self.compression or not hasattr(self._file, "seek"):
# For output stream start with a truncated file.
# For compressed files we can't really guess at the size
self.size = 0
else:
pos = self._file.tell()
self._file.seek(0, 2)
self.size = self._file.tell()
self._file.seek(pos)
if self.memmap:
if not isfile(self._file):
self.memmap = False
elif not self.readonly and not self._mmap_available:
# Test mmap.flush--see
# https://github.com/astropy/astropy/issues/968
self.memmap = False
def __repr__(self):
return f"<{self.__module__}.{self.__class__.__name__} {self._file}>"
# Support the 'with' statement
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def readable(self):
if self.writeonly:
return False
return isreadable(self._file)
def read(self, size=None):
if not hasattr(self._file, "read"):
raise EOFError
try:
return self._file.read(size)
except OSError:
# On some versions of Python, it appears, GzipFile will raise an
# OSError if you try to read past its end (as opposed to just
# returning '')
if self.compression == "gzip":
return ""
raise
def readarray(self, size=None, offset=0, dtype=np.uint8, shape=None):
"""
Similar to file.read(), but returns the contents of the underlying
file as a numpy array (or mmap'd array if memmap=True) rather than a
string.
Usually it's best not to use the `size` argument with this method, but
it's provided for compatibility.
"""
if not hasattr(self._file, "read"):
raise EOFError
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
if size and size % dtype.itemsize != 0:
raise ValueError(f"size {size} not a multiple of {dtype}")
if isinstance(shape, int):
shape = (shape,)
if not (size or shape):
warnings.warn(
"No size or shape given to readarray(); assuming a shape of (1,)",
AstropyUserWarning,
)
shape = (1,)
if size and not shape:
shape = (size // dtype.itemsize,)
if size and shape:
actualsize = np.prod(shape) * dtype.itemsize
if actualsize > size:
raise ValueError(
f"size {size} is too few bytes for a {shape} array of {dtype}"
)
elif actualsize < size:
raise ValueError(
f"size {size} is too many bytes for a {shape} array of {dtype}"
)
filepos = self._file.tell()
try:
if self.memmap:
if self._mmap is None:
# Instantiate Memmap array of the file offset at 0 (so we
# can return slices of it to offset anywhere else into the
# file)
access_mode = MEMMAP_MODES[self.mode]
# For reasons unknown the file needs to point to (near)
# the beginning or end of the file. No idea how close to
# the beginning or end.
# If I had to guess there is some bug in the mmap module
# of CPython or perhaps in microsoft's underlying code
# for generating the mmap.
self._file.seek(0, 0)
# This would also work:
# self._file.seek(0, 2) # moves to the end
try:
self._mmap = mmap.mmap(
self._file.fileno(), 0, access=access_mode, offset=0
)
except OSError as exc:
# NOTE: mode='readonly' results in the memory-mapping
# using the ACCESS_COPY mode in mmap so that users can
# modify arrays. However, on some systems, the OS raises
# a '[Errno 12] Cannot allocate memory' OSError if the
# address space is smaller than the file. The solution
# is to open the file in mode='denywrite', which at
# least allows the file to be opened even if the
# resulting arrays will be truly read-only.
if exc.errno == errno.ENOMEM and self.mode == "readonly":
warnings.warn(
"Could not memory map array with "
"mode='readonly', falling back to "
"mode='denywrite', which means that "
"the array will be read-only",
AstropyUserWarning,
)
self._mmap = mmap.mmap(
self._file.fileno(),
0,
access=MEMMAP_MODES["denywrite"],
offset=0,
)
else:
raise
return np.ndarray(
shape=shape, dtype=dtype, offset=offset, buffer=self._mmap
)
else:
count = reduce(operator.mul, shape)
self._file.seek(offset)
data = _array_from_file(self._file, dtype, count)
data.shape = shape
return data
finally:
# Make sure we leave the file in the position we found it; on
# some platforms (e.g. Windows) mmaping a file handle can also
# reset its file pointer.
# Also for Windows when using mmap seek() may return weird
# negative values, which is fixed by calling tell() before.
self._file.tell()
self._file.seek(filepos)
def writable(self):
if self.readonly:
return False
return iswritable(self._file)
def write(self, string):
if self.simulateonly:
return
if hasattr(self._file, "write"):
_write_string(self._file, string)
def writearray(self, array):
"""
Similar to file.write(), but writes a numpy array instead of a string.
Also like file.write(), a flush() or close() may be needed before
the file on disk reflects the data written.
"""
if self.simulateonly:
return
if hasattr(self._file, "write"):
_array_to_file(array, self._file)
def flush(self):
if self.simulateonly:
return
if hasattr(self._file, "flush"):
self._file.flush()
def seek(self, offset, whence=0):
if not hasattr(self._file, "seek"):
return
self._file.seek(offset, whence)
pos = self._file.tell()
if self.size and pos > self.size:
warnings.warn(
"File may have been truncated: actual file length "
"({}) is smaller than the expected size ({})".format(self.size, pos),
AstropyUserWarning,
)
def tell(self):
if self.simulateonly:
raise OSError
if not hasattr(self._file, "tell"):
raise EOFError
return self._file.tell()
def truncate(self, size=None):
if hasattr(self._file, "truncate"):
self._file.truncate(size)
def close(self):
"""
Close the 'physical' FITS file.
"""
if hasattr(self._file, "close"):
self._file.close()
self._maybe_close_mmap()
# Set self._memmap to None anyways since no new .data attributes can be
# loaded after the file is closed
self._mmap = None
self.closed = True
self.close_on_error = False
def _maybe_close_mmap(self, refcount_delta=0):
"""
When mmap is in use these objects hold a reference to the mmap of the
file (so there is only one, shared by all HDUs that reference this
file).
This will close the mmap if there are no arrays referencing it.
"""
# sys.getrefcount is CPython specific and not on PyPy.
if (
self._mmap is not None
and hasattr(sys, "getrefcount")
and sys.getrefcount(self._mmap) == 2 + refcount_delta
):
self._mmap.close()
self._mmap = None
def _overwrite_existing(self, overwrite, fileobj, closed):
"""Overwrite an existing file if ``overwrite`` is ``True``, otherwise
raise an OSError. The exact behavior of this method depends on the
_File object state and is only meant for use within the ``_open_*``
internal methods.
"""
# The file will be overwritten...
if (self.file_like and hasattr(fileobj, "len") and fileobj.len > 0) or (
os.path.exists(self.name) and os.path.getsize(self.name) != 0
):
if overwrite:
if self.file_like and hasattr(fileobj, "truncate"):
fileobj.truncate(0)
else:
if not closed:
fileobj.close()
os.remove(self.name)
else:
raise OSError(NOT_OVERWRITING_MSG.format(self.name))
def _try_read_compressed(self, obj_or_name, magic, mode, ext=""):
"""Attempt to determine if the given file is compressed."""
is_ostream = mode == "ostream"
if (is_ostream and ext == ".gz") or magic.startswith(GZIP_MAGIC):
if mode == "append":
raise OSError(
"'append' mode is not supported with gzip files."
"Use 'update' mode instead"
)
# Handle gzip files
kwargs = dict(mode=IO_FITS_MODES[mode])
if isinstance(obj_or_name, str):
kwargs["filename"] = obj_or_name
else:
kwargs["fileobj"] = obj_or_name
self._file = gzip.GzipFile(**kwargs)
self.compression = "gzip"
elif (is_ostream and ext == ".zip") or magic.startswith(PKZIP_MAGIC):
# Handle zip files
self._open_zipfile(self.name, mode)
self.compression = "zip"
elif (is_ostream and ext == ".bz2") or magic.startswith(BZIP2_MAGIC):
# Handle bzip2 files
if mode in ["update", "append"]:
raise OSError(
"update and append modes are not supported with bzip2 files"
)
if not HAS_BZ2:
raise ModuleNotFoundError(
"This Python installation does not provide the bz2 module."
)
# bzip2 only supports 'w' and 'r' modes
bzip2_mode = "w" if is_ostream else "r"
self._file = bz2.BZ2File(obj_or_name, mode=bzip2_mode)
self.compression = "bzip2"
return self.compression is not None
def _open_fileobj(self, fileobj, mode, overwrite):
"""Open a FITS file from a file object (including compressed files)."""
closed = fileobj_closed(fileobj)
# FIXME: this variable was unused, check if it was useful
# fmode = fileobj_mode(fileobj) or IO_FITS_MODES[mode]
if mode == "ostream":
self._overwrite_existing(overwrite, fileobj, closed)
if not closed:
self._file = fileobj
elif isfile(fileobj):
self._file = open(self.name, IO_FITS_MODES[mode])
# Attempt to determine if the file represented by the open file object
# is compressed
try:
# We need to account for the possibility that the underlying file
# handle may have been opened with either 'ab' or 'ab+', which
# means that the current file position is at the end of the file.
if mode in ["ostream", "append"]:
self._file.seek(0)
magic = self._file.read(4)
# No matter whether the underlying file was opened with 'ab' or
# 'ab+', we need to return to the beginning of the file in order
# to properly process the FITS header (and handle the possibility
# of a compressed file).
self._file.seek(0)
except OSError:
return
self._try_read_compressed(fileobj, magic, mode)
def _open_filelike(self, fileobj, mode, overwrite):
"""Open a FITS file from a file-like object, i.e. one that has
read and/or write methods.
"""
self.file_like = True
self._file = fileobj
if fileobj_closed(fileobj):
raise OSError(
f"Cannot read from/write to a closed file-like object ({fileobj!r})."
)
if isinstance(fileobj, zipfile.ZipFile):
self._open_zipfile(fileobj, mode)
# We can bypass any additional checks at this point since now
# self._file points to the temp file extracted from the zip
return
# If there is not seek or tell methods then set the mode to
# output streaming.
if not hasattr(self._file, "seek") or not hasattr(self._file, "tell"):
self.mode = mode = "ostream"
if mode == "ostream":
self._overwrite_existing(overwrite, fileobj, False)
# Any "writeable" mode requires a write() method on the file object
if self.mode in ("update", "append", "ostream") and not hasattr(
self._file, "write"
):
raise OSError(
"File-like object does not have a 'write' "
"method, required for mode '{}'.".format(self.mode)
)
# Any mode except for 'ostream' requires readability
if self.mode != "ostream" and not hasattr(self._file, "read"):
raise OSError(
"File-like object does not have a 'read' "
"method, required for mode {!r}.".format(self.mode)
)
def _open_filename(self, filename, mode, overwrite):
"""Open a FITS file from a filename string."""
if mode == "ostream":
self._overwrite_existing(overwrite, None, True)
if os.path.exists(self.name):
with open(self.name, "rb") as f:
magic = f.read(4)
else:
magic = b""
ext = os.path.splitext(self.name)[1]
if not self._try_read_compressed(self.name, magic, mode, ext=ext):
self._file = open(self.name, IO_FITS_MODES[mode])
self.close_on_error = True
# Make certain we're back at the beginning of the file
# BZ2File does not support seek when the file is open for writing, but
# when opening a file for write, bz2.BZ2File always truncates anyway.
if not (_is_bz2file(self._file) and mode == "ostream"):
self._file.seek(0)
@classproperty(lazy=True)
def _mmap_available(cls):
"""Tests that mmap, and specifically mmap.flush works. This may
be the case on some uncommon platforms (see
https://github.com/astropy/astropy/issues/968).
If mmap.flush is found not to work, ``self.memmap = False`` is
set and a warning is issued.
"""
tmpfd, tmpname = tempfile.mkstemp()
try:
# Windows does not allow mappings on empty files
os.write(tmpfd, b" ")
os.fsync(tmpfd)
try:
mm = mmap.mmap(tmpfd, 1, access=mmap.ACCESS_WRITE)
except OSError as exc:
warnings.warn(
f"Failed to create mmap: {exc}; mmap use will be disabled",
AstropyUserWarning,
)
del exc
return False
try:
mm.flush()
except OSError:
warnings.warn(
"mmap.flush is unavailable on this platform; "
"using mmap in writeable mode will be disabled",
AstropyUserWarning,
)
return False
finally:
mm.close()
finally:
os.close(tmpfd)
os.remove(tmpname)
return True
def _open_zipfile(self, fileobj, mode):
"""Limited support for zipfile.ZipFile objects containing a single
a file. Allows reading only for now by extracting the file to a
tempfile.
"""
if mode in ("update", "append"):
raise OSError("Writing to zipped fits files is not currently supported")
if not isinstance(fileobj, zipfile.ZipFile):
zfile = zipfile.ZipFile(fileobj)
close = True
else:
zfile = fileobj
close = False
namelist = zfile.namelist()
if len(namelist) != 1:
raise OSError("Zip files with multiple members are not supported.")
self._file = tempfile.NamedTemporaryFile(suffix=".fits")
self._file.write(zfile.read(namelist[0]))
if close:
zfile.close()
# We just wrote the contents of the first file in the archive to a new
# temp file, which now serves as our underlying file object. So it's
# necessary to reset the position back to the beginning
self._file.seek(0)
|
76590403e445d506aedd3ba5e10f2144d7659a54efbf19924429755883eaa6f8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import re
import warnings
from copy import deepcopy
import numpy as np
from astropy import units as u
from astropy.io import registry as io_registry
from astropy.table import Column, MaskedColumn, Table, meta, serialize
from astropy.time import Time
from astropy.utils.data_info import serialize_context_as
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
from astropy.utils.misc import NOT_OVERWRITING_MSG
from . import BinTableHDU, GroupsHDU, HDUList, TableHDU
from . import append as fits_append
from .column import KEYWORD_NAMES, _fortran_to_python_format
from .convenience import table_to_hdu
from .hdu.hdulist import FITS_SIGNATURE
from .hdu.hdulist import fitsopen as fits_open
from .util import first
# Keywords to remove for all tables that are read in
REMOVE_KEYWORDS = [
"XTENSION",
"BITPIX",
"NAXIS",
"NAXIS1",
"NAXIS2",
"PCOUNT",
"GCOUNT",
"TFIELDS",
"THEAP",
]
# Column-specific keywords regex
COLUMN_KEYWORD_REGEXP = "(" + "|".join(KEYWORD_NAMES) + ")[0-9]+"
def is_column_keyword(keyword):
return re.match(COLUMN_KEYWORD_REGEXP, keyword) is not None
def is_fits(origin, filepath, fileobj, *args, **kwargs):
"""
Determine whether `origin` is a FITS file.
Parameters
----------
origin : str or readable file-like
Path or file object containing a potential FITS file.
Returns
-------
is_fits : bool
Returns `True` if the given file is a FITS file.
"""
if fileobj is not None:
pos = fileobj.tell()
sig = fileobj.read(30)
fileobj.seek(pos)
return sig == FITS_SIGNATURE
elif filepath is not None:
if filepath.lower().endswith(
(".fits", ".fits.gz", ".fit", ".fit.gz", ".fts", ".fts.gz")
):
return True
return isinstance(args[0], (HDUList, TableHDU, BinTableHDU, GroupsHDU))
def _decode_mixins(tbl):
"""Decode a Table ``tbl`` that has astropy Columns + appropriate meta-data into
the corresponding table with mixin columns (as appropriate).
"""
# If available read in __serialized_columns__ meta info which is stored
# in FITS COMMENTS between two sentinels.
try:
i0 = tbl.meta["comments"].index("--BEGIN-ASTROPY-SERIALIZED-COLUMNS--")
i1 = tbl.meta["comments"].index("--END-ASTROPY-SERIALIZED-COLUMNS--")
except (ValueError, KeyError):
return tbl
# The YAML data are split into COMMENT cards, with lines longer than 70
# characters being split with a continuation character \ (backslash).
# Strip the backslashes and join together.
continuation_line = False
lines = []
for line in tbl.meta["comments"][i0 + 1 : i1]:
if continuation_line:
lines[-1] = lines[-1] + line[:70]
else:
lines.append(line[:70])
continuation_line = len(line) == 71
del tbl.meta["comments"][i0 : i1 + 1]
if not tbl.meta["comments"]:
del tbl.meta["comments"]
info = meta.get_header_from_yaml(lines)
# Add serialized column information to table meta for use in constructing mixins
tbl.meta["__serialized_columns__"] = info["meta"]["__serialized_columns__"]
# Use the `datatype` attribute info to update column attributes that are
# NOT already handled via standard FITS column keys (name, dtype, unit).
for col in info["datatype"]:
for attr in ["description", "meta"]:
if attr in col:
setattr(tbl[col["name"]].info, attr, col[attr])
# Construct new table with mixins, using tbl.meta['__serialized_columns__']
# as guidance.
tbl = serialize._construct_mixins_from_columns(tbl)
return tbl
def read_table_fits(
input,
hdu=None,
astropy_native=False,
memmap=False,
character_as_bytes=True,
unit_parse_strict="warn",
mask_invalid=True,
):
"""
Read a Table object from an FITS file.
If the ``astropy_native`` argument is ``True``, then input FITS columns
which are representations of an astropy core object will be converted to
that class and stored in the ``Table`` as "mixin columns". Currently this
is limited to FITS columns which adhere to the FITS Time standard, in which
case they will be converted to a `~astropy.time.Time` column in the output
table.
Parameters
----------
input : str or file-like or compatible `astropy.io.fits` HDU object
If a string, the filename to read the table from. If a file object, or
a compatible HDU object, the object to extract the table from. The
following `astropy.io.fits` HDU objects can be used as input:
- :class:`~astropy.io.fits.hdu.table.TableHDU`
- :class:`~astropy.io.fits.hdu.table.BinTableHDU`
- :class:`~astropy.io.fits.hdu.table.GroupsHDU`
- :class:`~astropy.io.fits.hdu.hdulist.HDUList`
hdu : int or str, optional
The HDU to read the table from.
astropy_native : bool, optional
Read in FITS columns as native astropy objects where possible instead
of standard Table Column objects. Default is False.
memmap : bool, optional
Whether to use memory mapping, which accesses data on disk as needed. If
you are only accessing part of the data, this is often more efficient.
If you want to access all the values in the table, and you are able to
fit the table in memory, you may be better off leaving memory mapping
off. However, if your table would not fit in memory, you should set this
to `True`.
When set to `True` then ``mask_invalid`` is set to `False` since the
masking would cause loading the full data array.
character_as_bytes : bool, optional
If `True`, string columns are stored as Numpy byte arrays (dtype ``S``)
and are converted on-the-fly to unicode strings when accessing
individual elements. If you need to use Numpy unicode arrays (dtype
``U``) internally, you should set this to `False`, but note that this
will use more memory. If set to `False`, string columns will not be
memory-mapped even if ``memmap`` is `True`.
unit_parse_strict : str, optional
Behaviour when encountering invalid column units in the FITS header.
Default is "warn", which will emit a ``UnitsWarning`` and create a
:class:`~astropy.units.core.UnrecognizedUnit`.
Values are the ones allowed by the ``parse_strict`` argument of
:class:`~astropy.units.core.Unit`: ``raise``, ``warn`` and ``silent``.
mask_invalid : bool, optional
By default the code masks NaNs in float columns and empty strings in
string columns. Set this parameter to `False` to avoid the performance
penalty of doing this masking step. The masking is always deactivated
when using ``memmap=True`` (see above).
"""
if isinstance(input, HDUList):
# Parse all table objects
tables = dict()
for ihdu, hdu_item in enumerate(input):
if isinstance(hdu_item, (TableHDU, BinTableHDU, GroupsHDU)):
tables[ihdu] = hdu_item
if len(tables) > 1:
if hdu is None:
warnings.warn(
"hdu= was not specified but multiple tables"
" are present, reading in first available"
f" table (hdu={first(tables)})",
AstropyUserWarning,
)
hdu = first(tables)
# hdu might not be an integer, so we first need to convert it
# to the correct HDU index
hdu = input.index_of(hdu)
if hdu in tables:
table = tables[hdu]
else:
raise ValueError(f"No table found in hdu={hdu}")
elif len(tables) == 1:
if hdu is not None:
msg = None
try:
hdi = input.index_of(hdu)
except KeyError:
msg = f"Specified hdu={hdu} not found"
else:
if hdi >= len(input):
msg = f"Specified hdu={hdu} not found"
elif hdi not in tables:
msg = f"No table found in specified hdu={hdu}"
if msg is not None:
warnings.warn(
f"{msg}, reading in first available table "
f"(hdu={first(tables)}) instead. This will"
" result in an error in future versions!",
AstropyDeprecationWarning,
)
table = tables[first(tables)]
else:
raise ValueError("No table found")
elif isinstance(input, (TableHDU, BinTableHDU, GroupsHDU)):
table = input
else:
if memmap:
# using memmap is not compatible with masking invalid value by
# default so we deactivate the masking
mask_invalid = False
hdulist = fits_open(input, character_as_bytes=character_as_bytes, memmap=memmap)
try:
return read_table_fits(
hdulist,
hdu=hdu,
astropy_native=astropy_native,
unit_parse_strict=unit_parse_strict,
mask_invalid=mask_invalid,
)
finally:
hdulist.close()
# In the loop below we access the data using data[col.name] rather than
# col.array to make sure that the data is scaled correctly if needed.
data = table.data
columns = []
for col in data.columns:
# Check if column is masked. Here, we make a guess based on the
# presence of FITS mask values. For integer columns, this is simply
# the null header, for float and complex, the presence of NaN, and for
# string, empty strings.
# Since Multi-element columns with dtypes such as '2f8' have a subdtype,
# we should look up the type of column on that.
masked = mask = False
coltype = col.dtype.subdtype[0].type if col.dtype.subdtype else col.dtype.type
if col.null is not None:
mask = data[col.name] == col.null
# Return a MaskedColumn even if no elements are masked so
# we roundtrip better.
masked = True
elif mask_invalid and issubclass(coltype, np.inexact):
mask = np.isnan(data[col.name])
elif mask_invalid and issubclass(coltype, np.character):
mask = col.array == b""
if masked or np.any(mask):
column = MaskedColumn(
data=data[col.name], name=col.name, mask=mask, copy=False
)
else:
column = Column(data=data[col.name], name=col.name, copy=False)
# Copy over units
if col.unit is not None:
column.unit = u.Unit(
col.unit, format="fits", parse_strict=unit_parse_strict
)
# Copy over display format
if col.disp is not None:
column.format = _fortran_to_python_format(col.disp)
columns.append(column)
# Create Table object
t = Table(columns, copy=False)
# TODO: deal properly with unsigned integers
hdr = table.header
if astropy_native:
# Avoid circular imports, and also only import if necessary.
from .fitstime import fits_to_time
hdr = fits_to_time(hdr, t)
for key, value, comment in hdr.cards:
if key in ["COMMENT", "HISTORY"]:
# Convert to io.ascii format
if key == "COMMENT":
key = "comments"
if key in t.meta:
t.meta[key].append(value)
else:
t.meta[key] = [value]
elif key in t.meta: # key is duplicate
if isinstance(t.meta[key], list):
t.meta[key].append(value)
else:
t.meta[key] = [t.meta[key], value]
elif is_column_keyword(key) or key in REMOVE_KEYWORDS:
pass
else:
t.meta[key] = value
# TODO: implement masking
# Decode any mixin columns that have been stored as standard Columns.
t = _decode_mixins(t)
return t
def _encode_mixins(tbl):
"""Encode a Table ``tbl`` that may have mixin columns to a Table with only
astropy Columns + appropriate meta-data to allow subsequent decoding.
"""
# Determine if information will be lost without serializing meta. This is hardcoded
# to the set difference between column info attributes and what FITS can store
# natively (name, dtype, unit). See _get_col_attributes() in table/meta.py for where
# this comes from.
info_lost = any(
any(
getattr(col.info, attr, None) not in (None, {})
for attr in ("description", "meta")
)
for col in tbl.itercols()
)
# Convert the table to one with no mixins, only Column objects. This adds
# meta data which is extracted with meta.get_yaml_from_table. This ignores
# Time-subclass columns and leave them in the table so that the downstream
# FITS Time handling does the right thing.
with serialize_context_as("fits"):
encode_tbl = serialize.represent_mixins_as_columns(tbl, exclude_classes=(Time,))
# If the encoded table is unchanged then there were no mixins. But if there
# is column metadata (format, description, meta) that would be lost, then
# still go through the serialized columns machinery.
if encode_tbl is tbl and not info_lost:
return tbl
# Copy the meta dict if it was not copied by represent_mixins_as_columns.
# We will modify .meta['comments'] below and we do not want to see these
# comments in the input table.
if encode_tbl is tbl:
meta_copy = deepcopy(tbl.meta)
encode_tbl = Table(tbl.columns, meta=meta_copy, copy=False)
# Get the YAML serialization of information describing the table columns.
# This is re-using ECSV code that combined existing table.meta with with
# the extra __serialized_columns__ key. For FITS the table.meta is handled
# by the native FITS connect code, so don't include that in the YAML
# output.
ser_col = "__serialized_columns__"
# encode_tbl might not have a __serialized_columns__ key if there were no mixins,
# but machinery below expects it to be available, so just make an empty dict.
encode_tbl.meta.setdefault(ser_col, {})
tbl_meta_copy = encode_tbl.meta.copy()
try:
encode_tbl.meta = {ser_col: encode_tbl.meta[ser_col]}
meta_yaml_lines = meta.get_yaml_from_table(encode_tbl)
finally:
encode_tbl.meta = tbl_meta_copy
del encode_tbl.meta[ser_col]
if "comments" not in encode_tbl.meta:
encode_tbl.meta["comments"] = []
encode_tbl.meta["comments"].append("--BEGIN-ASTROPY-SERIALIZED-COLUMNS--")
for line in meta_yaml_lines:
if len(line) == 0:
lines = [""]
else:
# Split line into 70 character chunks for COMMENT cards
idxs = list(range(0, len(line) + 70, 70))
lines = [line[i0:i1] + "\\" for i0, i1 in zip(idxs[:-1], idxs[1:])]
lines[-1] = lines[-1][:-1]
encode_tbl.meta["comments"].extend(lines)
encode_tbl.meta["comments"].append("--END-ASTROPY-SERIALIZED-COLUMNS--")
return encode_tbl
def write_table_fits(input, output, overwrite=False, append=False):
"""
Write a Table object to a FITS file.
Parameters
----------
input : Table
The table to write out.
output : str
The filename to write the table to.
overwrite : bool
Whether to overwrite any existing file without warning.
append : bool
Whether to append the table to an existing file
"""
# Encode any mixin columns into standard Columns.
input = _encode_mixins(input)
table_hdu = table_to_hdu(input, character_as_bytes=True)
# Check if output file already exists
if isinstance(output, str) and os.path.exists(output):
if overwrite:
os.remove(output)
elif not append:
raise OSError(NOT_OVERWRITING_MSG.format(output))
if append:
# verify=False stops it reading and checking the existing file.
fits_append(output, table_hdu.data, table_hdu.header, verify=False)
else:
table_hdu.writeto(output)
io_registry.register_reader("fits", Table, read_table_fits)
io_registry.register_writer("fits", Table, write_table_fits)
io_registry.register_identifier("fits", Table, is_fits)
|
a9eb7d65967f32518b7cbf8bfd58f0fbafdf59880dfe8278c80e6597fd1a090e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Facilities for diffing two FITS files. Includes objects for diffing entire
FITS files, individual HDUs, FITS headers, or just FITS data.
Used to implement the fitsdiff program.
"""
import fnmatch
import glob
import io
import operator
import os
import os.path
import textwrap
from collections import defaultdict
from inspect import signature
from itertools import islice
import numpy as np
from astropy import __version__
from astropy.utils.diff import (
diff_values,
fixed_width_indent,
report_diff_values,
where_not_allclose,
)
from astropy.utils.misc import NOT_OVERWRITING_MSG
from .card import BLANK_CARD, Card
# HDUList is used in one of the doctests
from .hdu.hdulist import HDUList, fitsopen # pylint: disable=W0611
from .hdu.table import _TableLikeHDU
from .header import Header
from .util import path_like
__all__ = [
"FITSDiff",
"HDUDiff",
"HeaderDiff",
"ImageDataDiff",
"RawDataDiff",
"TableDataDiff",
]
# Column attributes of interest for comparison
_COL_ATTRS = [
("unit", "units"),
("null", "null values"),
("bscale", "bscales"),
("bzero", "bzeros"),
("disp", "display formats"),
("dim", "dimensions"),
]
class _BaseDiff:
"""
Base class for all FITS diff objects.
When instantiating a FITS diff object, the first two arguments are always
the two objects to diff (two FITS files, two FITS headers, etc.).
Instantiating a ``_BaseDiff`` also causes the diff itself to be executed.
The returned ``_BaseDiff`` instance has a number of attribute that describe
the results of the diff operation.
The most basic attribute, present on all ``_BaseDiff`` instances, is
``.identical`` which is `True` if the two objects being compared are
identical according to the diff method for objects of that type.
"""
def __init__(self, a, b):
"""
The ``_BaseDiff`` class does not implement a ``_diff`` method and
should not be instantiated directly. Instead instantiate the
appropriate subclass of ``_BaseDiff`` for the objects being compared
(for example, use `HeaderDiff` to compare two `Header` objects.
"""
self.a = a
self.b = b
# For internal use in report output
self._fileobj = None
self._indent = 0
self._diff()
def __bool__(self):
"""
A ``_BaseDiff`` object acts as `True` in a boolean context if the two
objects compared are different. Otherwise it acts as `False`.
"""
return not self.identical
@classmethod
def fromdiff(cls, other, a, b):
"""
Returns a new Diff object of a specific subclass from an existing diff
object, passing on the values for any arguments they share in common
(such as ignore_keywords).
For example::
>>> from astropy.io import fits
>>> hdul1, hdul2 = fits.HDUList(), fits.HDUList()
>>> headera, headerb = fits.Header(), fits.Header()
>>> fd = fits.FITSDiff(hdul1, hdul2, ignore_keywords=['*'])
>>> hd = fits.HeaderDiff.fromdiff(fd, headera, headerb)
>>> list(hd.ignore_keywords)
['*']
"""
sig = signature(cls.__init__)
# The first 3 arguments of any Diff initializer are self, a, and b.
kwargs = {}
for arg in list(sig.parameters.keys())[3:]:
if hasattr(other, arg):
kwargs[arg] = getattr(other, arg)
return cls(a, b, **kwargs)
@property
def identical(self):
"""
`True` if all the ``.diff_*`` attributes on this diff instance are
empty, implying that no differences were found.
Any subclass of ``_BaseDiff`` must have at least one ``.diff_*``
attribute, which contains a non-empty value if and only if some
difference was found between the two objects being compared.
"""
return not any(
getattr(self, attr) for attr in self.__dict__ if attr.startswith("diff_")
)
def report(self, fileobj=None, indent=0, overwrite=False):
"""
Generates a text report on the differences (if any) between two
objects, and either returns it as a string or writes it to a file-like
object.
Parameters
----------
fileobj : file-like, string, or None, optional
If `None`, this method returns the report as a string. Otherwise it
returns `None` and writes the report to the given file-like object
(which must have a ``.write()`` method at a minimum), or to a new
file at the path specified.
indent : int
The number of 4 space tabs to indent the report.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
Returns
-------
report : str or None
"""
return_string = False
filepath = None
if isinstance(fileobj, path_like):
fileobj = os.path.expanduser(fileobj)
if os.path.exists(fileobj) and not overwrite:
raise OSError(NOT_OVERWRITING_MSG.format(fileobj))
else:
filepath = fileobj
fileobj = open(filepath, "w")
elif fileobj is None:
fileobj = io.StringIO()
return_string = True
self._fileobj = fileobj
self._indent = indent # This is used internally by _writeln
try:
self._report()
finally:
if filepath:
fileobj.close()
if return_string:
return fileobj.getvalue()
def _writeln(self, text):
self._fileobj.write(fixed_width_indent(text, self._indent) + "\n")
def _diff(self):
raise NotImplementedError
def _report(self):
raise NotImplementedError
class FITSDiff(_BaseDiff):
"""Diff two FITS files by filename, or two `HDUList` objects.
`FITSDiff` objects have the following diff attributes:
- ``diff_hdu_count``: If the FITS files being compared have different
numbers of HDUs, this contains a 2-tuple of the number of HDUs in each
file.
- ``diff_hdus``: If any HDUs with the same index are different, this
contains a list of 2-tuples of the HDU index and the `HDUDiff` object
representing the differences between the two HDUs.
"""
def __init__(
self,
a,
b,
ignore_hdus=[],
ignore_keywords=[],
ignore_comments=[],
ignore_fields=[],
numdiffs=10,
rtol=0.0,
atol=0.0,
ignore_blanks=True,
ignore_blank_cards=True,
):
"""
Parameters
----------
a : str or `HDUList`
The filename of a FITS file on disk, or an `HDUList` object.
b : str or `HDUList`
The filename of a FITS file on disk, or an `HDUList` object to
compare to the first file.
ignore_hdus : sequence, optional
HDU names to ignore when comparing two FITS files or HDU lists; the
presence of these HDUs and their contents are ignored. Wildcard
strings may also be included in the list.
ignore_keywords : sequence, optional
Header keywords to ignore when comparing two headers; the presence
of these keywords and their values are ignored. Wildcard strings
may also be included in the list.
ignore_comments : sequence, optional
A list of header keywords whose comments should be ignored in the
comparison. May contain wildcard strings as with ignore_keywords.
ignore_fields : sequence, optional
The (case-insensitive) names of any table columns to ignore if any
table data is to be compared.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionadded:: 2.0
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
ignore_blanks : bool, optional
Ignore extra whitespace at the end of string values either in
headers or data. Extra leading whitespace is not ignored
(default: True).
ignore_blank_cards : bool, optional
Ignore all cards that are blank, i.e. they only contain
whitespace (default: True).
"""
if isinstance(a, (str, os.PathLike)):
try:
a = fitsopen(a)
except Exception as exc:
raise OSError(
"error opening file a ({}): {}: {}".format(
a, exc.__class__.__name__, exc.args[0]
)
)
close_a = True
else:
close_a = False
if isinstance(b, (str, os.PathLike)):
try:
b = fitsopen(b)
except Exception as exc:
raise OSError(
"error opening file b ({}): {}: {}".format(
b, exc.__class__.__name__, exc.args[0]
)
)
close_b = True
else:
close_b = False
# Normalize keywords/fields to ignore to upper case
self.ignore_hdus = {k.upper() for k in ignore_hdus}
self.ignore_keywords = {k.upper() for k in ignore_keywords}
self.ignore_comments = {k.upper() for k in ignore_comments}
self.ignore_fields = {k.upper() for k in ignore_fields}
self.numdiffs = numdiffs
self.rtol = rtol
self.atol = atol
self.ignore_blanks = ignore_blanks
self.ignore_blank_cards = ignore_blank_cards
# Some hdu names may be pattern wildcards. Find them.
self.ignore_hdu_patterns = set()
for name in list(self.ignore_hdus):
if name != "*" and glob.has_magic(name):
self.ignore_hdus.remove(name)
self.ignore_hdu_patterns.add(name)
self.diff_hdu_count = ()
self.diff_hdus = []
try:
super().__init__(a, b)
finally:
if close_a:
a.close()
if close_b:
b.close()
def _diff(self):
if len(self.a) != len(self.b):
self.diff_hdu_count = (len(self.a), len(self.b))
# Record filenames for use later in _report
self.filenamea = self.a.filename()
if not self.filenamea:
self.filenamea = f"<{self.a.__class__.__name__} object at {id(self.a):#x}>"
self.filenameb = self.b.filename()
if not self.filenameb:
self.filenameb = f"<{self.b.__class__.__name__} object at {id(self.b):#x}>"
if self.ignore_hdus:
self.a = HDUList([h for h in self.a if h.name not in self.ignore_hdus])
self.b = HDUList([h for h in self.b if h.name not in self.ignore_hdus])
if self.ignore_hdu_patterns:
a_names = [hdu.name for hdu in self.a]
b_names = [hdu.name for hdu in self.b]
for pattern in self.ignore_hdu_patterns:
self.a = HDUList(
[
h
for h in self.a
if h.name not in fnmatch.filter(a_names, pattern)
]
)
self.b = HDUList(
[
h
for h in self.b
if h.name not in fnmatch.filter(b_names, pattern)
]
)
# For now, just compare the extensions one by one in order.
# Might allow some more sophisticated types of diffing later.
# TODO: Somehow or another simplify the passing around of diff
# options--this will become important as the number of options grows
for idx in range(min(len(self.a), len(self.b))):
hdu_diff = HDUDiff.fromdiff(self, self.a[idx], self.b[idx])
if not hdu_diff.identical:
if (
self.a[idx].name == self.b[idx].name
and self.a[idx].ver == self.b[idx].ver
):
self.diff_hdus.append(
(idx, hdu_diff, self.a[idx].name, self.a[idx].ver)
)
else:
self.diff_hdus.append((idx, hdu_diff, "", self.a[idx].ver))
def _report(self):
wrapper = textwrap.TextWrapper(initial_indent=" ", subsequent_indent=" ")
self._fileobj.write("\n")
self._writeln(f" fitsdiff: {__version__}")
self._writeln(f" a: {self.filenamea}\n b: {self.filenameb}")
if self.ignore_hdus:
ignore_hdus = " ".join(sorted(self.ignore_hdus))
self._writeln(" HDU(s) not to be compared:\n" + wrapper.fill(ignore_hdus))
if self.ignore_hdu_patterns:
ignore_hdu_patterns = " ".join(sorted(self.ignore_hdu_patterns))
self._writeln(
" HDU(s) not to be compared:\n" + wrapper.fill(ignore_hdu_patterns)
)
if self.ignore_keywords:
ignore_keywords = " ".join(sorted(self.ignore_keywords))
self._writeln(
" Keyword(s) not to be compared:\n" + wrapper.fill(ignore_keywords)
)
if self.ignore_comments:
ignore_comments = " ".join(sorted(self.ignore_comments))
self._writeln(
" Keyword(s) whose comments are not to be compared:\n"
+ wrapper.fill(ignore_comments)
)
if self.ignore_fields:
ignore_fields = " ".join(sorted(self.ignore_fields))
self._writeln(
" Table column(s) not to be compared:\n" + wrapper.fill(ignore_fields)
)
self._writeln(
f" Maximum number of different data values to be reported: {self.numdiffs}"
)
self._writeln(
f" Relative tolerance: {self.rtol}, Absolute tolerance: {self.atol}"
)
if self.diff_hdu_count:
self._fileobj.write("\n")
self._writeln("Files contain different numbers of HDUs:")
self._writeln(f" a: {self.diff_hdu_count[0]}")
self._writeln(f" b: {self.diff_hdu_count[1]}")
if not self.diff_hdus:
self._writeln("No differences found between common HDUs.")
return
elif not self.diff_hdus:
self._fileobj.write("\n")
self._writeln("No differences found.")
return
for idx, hdu_diff, extname, extver in self.diff_hdus:
# print out the extension heading
if idx == 0:
self._fileobj.write("\n")
self._writeln("Primary HDU:")
else:
self._fileobj.write("\n")
if extname:
self._writeln(f"Extension HDU {idx} ({extname}, {extver}):")
else:
self._writeln(f"Extension HDU {idx}:")
hdu_diff.report(self._fileobj, indent=self._indent + 1)
class HDUDiff(_BaseDiff):
"""
Diff two HDU objects, including their headers and their data (but only if
both HDUs contain the same type of data (image, table, or unknown).
`HDUDiff` objects have the following diff attributes:
- ``diff_extnames``: If the two HDUs have different EXTNAME values, this
contains a 2-tuple of the different extension names.
- ``diff_extvers``: If the two HDUS have different EXTVER values, this
contains a 2-tuple of the different extension versions.
- ``diff_extlevels``: If the two HDUs have different EXTLEVEL values, this
contains a 2-tuple of the different extension levels.
- ``diff_extension_types``: If the two HDUs have different XTENSION values,
this contains a 2-tuple of the different extension types.
- ``diff_headers``: Contains a `HeaderDiff` object for the headers of the
two HDUs. This will always contain an object--it may be determined
whether the headers are different through ``diff_headers.identical``.
- ``diff_data``: Contains either a `ImageDataDiff`, `TableDataDiff`, or
`RawDataDiff` as appropriate for the data in the HDUs, and only if the
two HDUs have non-empty data of the same type (`RawDataDiff` is used for
HDUs containing non-empty data of an indeterminate type).
"""
def __init__(
self,
a,
b,
ignore_keywords=[],
ignore_comments=[],
ignore_fields=[],
numdiffs=10,
rtol=0.0,
atol=0.0,
ignore_blanks=True,
ignore_blank_cards=True,
):
"""
Parameters
----------
a : BaseHDU
An HDU object.
b : BaseHDU
An HDU object to compare to the first HDU object.
ignore_keywords : sequence, optional
Header keywords to ignore when comparing two headers; the presence
of these keywords and their values are ignored. Wildcard strings
may also be included in the list.
ignore_comments : sequence, optional
A list of header keywords whose comments should be ignored in the
comparison. May contain wildcard strings as with ignore_keywords.
ignore_fields : sequence, optional
The (case-insensitive) names of any table columns to ignore if any
table data is to be compared.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionadded:: 2.0
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
ignore_blanks : bool, optional
Ignore extra whitespace at the end of string values either in
headers or data. Extra leading whitespace is not ignored
(default: True).
ignore_blank_cards : bool, optional
Ignore all cards that are blank, i.e. they only contain
whitespace (default: True).
"""
self.ignore_keywords = {k.upper() for k in ignore_keywords}
self.ignore_comments = {k.upper() for k in ignore_comments}
self.ignore_fields = {k.upper() for k in ignore_fields}
self.rtol = rtol
self.atol = atol
self.numdiffs = numdiffs
self.ignore_blanks = ignore_blanks
self.ignore_blank_cards = ignore_blank_cards
self.diff_extnames = ()
self.diff_extvers = ()
self.diff_extlevels = ()
self.diff_extension_types = ()
self.diff_headers = None
self.diff_data = None
super().__init__(a, b)
def _diff(self):
if self.a.name != self.b.name:
self.diff_extnames = (self.a.name, self.b.name)
if self.a.ver != self.b.ver:
self.diff_extvers = (self.a.ver, self.b.ver)
if self.a.level != self.b.level:
self.diff_extlevels = (self.a.level, self.b.level)
if self.a.header.get("XTENSION") != self.b.header.get("XTENSION"):
self.diff_extension_types = (
self.a.header.get("XTENSION"),
self.b.header.get("XTENSION"),
)
self.diff_headers = HeaderDiff.fromdiff(
self, self.a.header.copy(), self.b.header.copy()
)
if self.a.data is None or self.b.data is None:
# TODO: Perhaps have some means of marking this case
pass
elif self.a.is_image and self.b.is_image:
self.diff_data = ImageDataDiff.fromdiff(self, self.a.data, self.b.data)
# Clean up references to (possibly) memmapped arrays so they can
# be closed by .close()
self.diff_data.a = None
self.diff_data.b = None
elif isinstance(self.a, _TableLikeHDU) and isinstance(self.b, _TableLikeHDU):
# TODO: Replace this if/when _BaseHDU grows a .is_table property
self.diff_data = TableDataDiff.fromdiff(self, self.a.data, self.b.data)
# Clean up references to (possibly) memmapped arrays so they can
# be closed by .close()
self.diff_data.a = None
self.diff_data.b = None
elif not self.diff_extension_types:
# Don't diff the data for unequal extension types that are not
# recognized image or table types
self.diff_data = RawDataDiff.fromdiff(self, self.a.data, self.b.data)
# Clean up references to (possibly) memmapped arrays so they can
# be closed by .close()
self.diff_data.a = None
self.diff_data.b = None
def _report(self):
if self.identical:
self._writeln(" No differences found.")
if self.diff_extension_types:
self._writeln(
" Extension types differ:\n a: {}\n b: {}".format(
*self.diff_extension_types
)
)
if self.diff_extnames:
self._writeln(
" Extension names differ:\n a: {}\n b: {}".format(*self.diff_extnames)
)
if self.diff_extvers:
self._writeln(
" Extension versions differ:\n a: {}\n b: {}".format(
*self.diff_extvers
)
)
if self.diff_extlevels:
self._writeln(
" Extension levels differ:\n a: {}\n b: {}".format(
*self.diff_extlevels
)
)
if not self.diff_headers.identical:
self._fileobj.write("\n")
self._writeln(" Headers contain differences:")
self.diff_headers.report(self._fileobj, indent=self._indent + 1)
if self.diff_data is not None and not self.diff_data.identical:
self._fileobj.write("\n")
self._writeln(" Data contains differences:")
self.diff_data.report(self._fileobj, indent=self._indent + 1)
class HeaderDiff(_BaseDiff):
"""
Diff two `Header` objects.
`HeaderDiff` objects have the following diff attributes:
- ``diff_keyword_count``: If the two headers contain a different number of
keywords, this contains a 2-tuple of the keyword count for each header.
- ``diff_keywords``: If either header contains one or more keywords that
don't appear at all in the other header, this contains a 2-tuple
consisting of a list of the keywords only appearing in header a, and a
list of the keywords only appearing in header b.
- ``diff_duplicate_keywords``: If a keyword appears in both headers at
least once, but contains a different number of duplicates (for example, a
different number of HISTORY cards in each header), an item is added to
this dict with the keyword as the key, and a 2-tuple of the different
counts of that keyword as the value. For example::
{'HISTORY': (20, 19)}
means that header a contains 20 HISTORY cards, while header b contains
only 19 HISTORY cards.
- ``diff_keyword_values``: If any of the common keyword between the two
headers have different values, they appear in this dict. It has a
structure similar to ``diff_duplicate_keywords``, with the keyword as the
key, and a 2-tuple of the different values as the value. For example::
{'NAXIS': (2, 3)}
means that the NAXIS keyword has a value of 2 in header a, and a value of
3 in header b. This excludes any keywords matched by the
``ignore_keywords`` list.
- ``diff_keyword_comments``: Like ``diff_keyword_values``, but contains
differences between keyword comments.
`HeaderDiff` objects also have a ``common_keywords`` attribute that lists
all keywords that appear in both headers.
"""
def __init__(
self,
a,
b,
ignore_keywords=[],
ignore_comments=[],
rtol=0.0,
atol=0.0,
ignore_blanks=True,
ignore_blank_cards=True,
):
"""
Parameters
----------
a : `~astropy.io.fits.Header` or string or bytes
A header.
b : `~astropy.io.fits.Header` or string or bytes
A header to compare to the first header.
ignore_keywords : sequence, optional
Header keywords to ignore when comparing two headers; the presence
of these keywords and their values are ignored. Wildcard strings
may also be included in the list.
ignore_comments : sequence, optional
A list of header keywords whose comments should be ignored in the
comparison. May contain wildcard strings as with ignore_keywords.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionadded:: 2.0
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
ignore_blanks : bool, optional
Ignore extra whitespace at the end of string values either in
headers or data. Extra leading whitespace is not ignored
(default: True).
ignore_blank_cards : bool, optional
Ignore all cards that are blank, i.e. they only contain
whitespace (default: True).
"""
self.ignore_keywords = {k.upper() for k in ignore_keywords}
self.ignore_comments = {k.upper() for k in ignore_comments}
self.rtol = rtol
self.atol = atol
self.ignore_blanks = ignore_blanks
self.ignore_blank_cards = ignore_blank_cards
self.ignore_keyword_patterns = set()
self.ignore_comment_patterns = set()
for keyword in list(self.ignore_keywords):
keyword = keyword.upper()
if keyword != "*" and glob.has_magic(keyword):
self.ignore_keywords.remove(keyword)
self.ignore_keyword_patterns.add(keyword)
for keyword in list(self.ignore_comments):
keyword = keyword.upper()
if keyword != "*" and glob.has_magic(keyword):
self.ignore_comments.remove(keyword)
self.ignore_comment_patterns.add(keyword)
# Keywords appearing in each header
self.common_keywords = []
# Set to the number of keywords in each header if the counts differ
self.diff_keyword_count = ()
# Set if the keywords common to each header (excluding ignore_keywords)
# appear in different positions within the header
# TODO: Implement this
self.diff_keyword_positions = ()
# Keywords unique to each header (excluding keywords in
# ignore_keywords)
self.diff_keywords = ()
# Keywords that have different numbers of duplicates in each header
# (excluding keywords in ignore_keywords)
self.diff_duplicate_keywords = {}
# Keywords common to each header but having different values (excluding
# keywords in ignore_keywords)
self.diff_keyword_values = defaultdict(list)
# Keywords common to each header but having different comments
# (excluding keywords in ignore_keywords or in ignore_comments)
self.diff_keyword_comments = defaultdict(list)
if isinstance(a, str):
a = Header.fromstring(a)
if isinstance(b, str):
b = Header.fromstring(b)
if not (isinstance(a, Header) and isinstance(b, Header)):
raise TypeError(
"HeaderDiff can only diff astropy.io.fits.Header "
"objects or strings containing FITS headers."
)
super().__init__(a, b)
# TODO: This doesn't pay much attention to the *order* of the keywords,
# except in the case of duplicate keywords. The order should be checked
# too, or at least it should be an option.
def _diff(self):
if self.ignore_blank_cards:
cardsa = [c for c in self.a.cards if str(c) != BLANK_CARD]
cardsb = [c for c in self.b.cards if str(c) != BLANK_CARD]
else:
cardsa = list(self.a.cards)
cardsb = list(self.b.cards)
# build dictionaries of keyword values and comments
def get_header_values_comments(cards):
values = {}
comments = {}
for card in cards:
value = card.value
if self.ignore_blanks and isinstance(value, str):
value = value.rstrip()
values.setdefault(card.keyword, []).append(value)
comments.setdefault(card.keyword, []).append(card.comment)
return values, comments
valuesa, commentsa = get_header_values_comments(cardsa)
valuesb, commentsb = get_header_values_comments(cardsb)
# Normalize all keyword to upper-case for comparison's sake;
# TODO: HIERARCH keywords should be handled case-sensitively I think
keywordsa = {k.upper() for k in valuesa}
keywordsb = {k.upper() for k in valuesb}
self.common_keywords = sorted(keywordsa.intersection(keywordsb))
if len(cardsa) != len(cardsb):
self.diff_keyword_count = (len(cardsa), len(cardsb))
# Any other diff attributes should exclude ignored keywords
keywordsa = keywordsa.difference(self.ignore_keywords)
keywordsb = keywordsb.difference(self.ignore_keywords)
if self.ignore_keyword_patterns:
for pattern in self.ignore_keyword_patterns:
keywordsa = keywordsa.difference(fnmatch.filter(keywordsa, pattern))
keywordsb = keywordsb.difference(fnmatch.filter(keywordsb, pattern))
if "*" in self.ignore_keywords:
# Any other differences between keywords are to be ignored
return
left_only_keywords = sorted(keywordsa.difference(keywordsb))
right_only_keywords = sorted(keywordsb.difference(keywordsa))
if left_only_keywords or right_only_keywords:
self.diff_keywords = (left_only_keywords, right_only_keywords)
# Compare count of each common keyword
for keyword in self.common_keywords:
if keyword in self.ignore_keywords:
continue
if self.ignore_keyword_patterns:
skip = False
for pattern in self.ignore_keyword_patterns:
if fnmatch.fnmatch(keyword, pattern):
skip = True
break
if skip:
continue
counta = len(valuesa[keyword])
countb = len(valuesb[keyword])
if counta != countb:
self.diff_duplicate_keywords[keyword] = (counta, countb)
# Compare keywords' values and comments
for a, b in zip(valuesa[keyword], valuesb[keyword]):
if diff_values(a, b, rtol=self.rtol, atol=self.atol):
self.diff_keyword_values[keyword].append((a, b))
else:
# If there are duplicate keywords we need to be able to
# index each duplicate; if the values of a duplicate
# are identical use None here
self.diff_keyword_values[keyword].append(None)
if not any(self.diff_keyword_values[keyword]):
# No differences found; delete the array of Nones
del self.diff_keyword_values[keyword]
if "*" in self.ignore_comments or keyword in self.ignore_comments:
continue
if self.ignore_comment_patterns:
skip = False
for pattern in self.ignore_comment_patterns:
if fnmatch.fnmatch(keyword, pattern):
skip = True
break
if skip:
continue
for a, b in zip(commentsa[keyword], commentsb[keyword]):
if diff_values(a, b):
self.diff_keyword_comments[keyword].append((a, b))
else:
self.diff_keyword_comments[keyword].append(None)
if not any(self.diff_keyword_comments[keyword]):
del self.diff_keyword_comments[keyword]
def _report(self):
if self.diff_keyword_count:
self._writeln(" Headers have different number of cards:")
self._writeln(f" a: {self.diff_keyword_count[0]}")
self._writeln(f" b: {self.diff_keyword_count[1]}")
if self.diff_keywords:
for keyword in self.diff_keywords[0]:
if keyword in Card._commentary_keywords:
val = self.a[keyword][0]
else:
val = self.a[keyword]
self._writeln(f" Extra keyword {keyword!r:8} in a: {val!r}")
for keyword in self.diff_keywords[1]:
if keyword in Card._commentary_keywords:
val = self.b[keyword][0]
else:
val = self.b[keyword]
self._writeln(f" Extra keyword {keyword!r:8} in b: {val!r}")
if self.diff_duplicate_keywords:
for keyword, count in sorted(self.diff_duplicate_keywords.items()):
self._writeln(f" Inconsistent duplicates of keyword {keyword!r:8}:")
self._writeln(
" Occurs {} time(s) in a, {} times in (b)".format(*count)
)
if self.diff_keyword_values or self.diff_keyword_comments:
for keyword in self.common_keywords:
report_diff_keyword_attr(
self._fileobj,
"values",
self.diff_keyword_values,
keyword,
ind=self._indent,
)
report_diff_keyword_attr(
self._fileobj,
"comments",
self.diff_keyword_comments,
keyword,
ind=self._indent,
)
# TODO: It might be good if there was also a threshold option for percentage of
# different pixels: For example ignore if only 1% of the pixels are different
# within some threshold. There are lots of possibilities here, but hold off
# for now until specific cases come up.
class ImageDataDiff(_BaseDiff):
"""
Diff two image data arrays (really any array from a PRIMARY HDU or an IMAGE
extension HDU, though the data unit is assumed to be "pixels").
`ImageDataDiff` objects have the following diff attributes:
- ``diff_dimensions``: If the two arrays contain either a different number
of dimensions or different sizes in any dimension, this contains a
2-tuple of the shapes of each array. Currently no further comparison is
performed on images that don't have the exact same dimensions.
- ``diff_pixels``: If the two images contain any different pixels, this
contains a list of 2-tuples of the array index where the difference was
found, and another 2-tuple containing the different values. For example,
if the pixel at (0, 0) contains different values this would look like::
[(0, 0), (1.1, 2.2)]
where 1.1 and 2.2 are the values of that pixel in each array. This
array only contains up to ``self.numdiffs`` differences, for storage
efficiency.
- ``diff_total``: The total number of different pixels found between the
arrays. Although ``diff_pixels`` does not necessarily contain all the
different pixel values, this can be used to get a count of the total
number of differences found.
- ``diff_ratio``: Contains the ratio of ``diff_total`` to the total number
of pixels in the arrays.
"""
def __init__(self, a, b, numdiffs=10, rtol=0.0, atol=0.0):
"""
Parameters
----------
a : BaseHDU
An HDU object.
b : BaseHDU
An HDU object to compare to the first HDU object.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionadded:: 2.0
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
"""
self.numdiffs = numdiffs
self.rtol = rtol
self.atol = atol
self.diff_dimensions = ()
self.diff_pixels = []
self.diff_ratio = 0
# self.diff_pixels only holds up to numdiffs differing pixels, but this
# self.diff_total stores the total count of differences between
# the images, but not the different values
self.diff_total = 0
super().__init__(a, b)
def _diff(self):
if self.a.shape != self.b.shape:
self.diff_dimensions = (self.a.shape, self.b.shape)
# Don't do any further comparison if the dimensions differ
# TODO: Perhaps we could, however, diff just the intersection
# between the two images
return
# Find the indices where the values are not equal
# If neither a nor b are floating point (or complex), ignore rtol and
# atol
if not (
np.issubdtype(self.a.dtype, np.inexact)
or np.issubdtype(self.b.dtype, np.inexact)
):
rtol = 0
atol = 0
else:
rtol = self.rtol
atol = self.atol
diffs = where_not_allclose(self.a, self.b, atol=atol, rtol=rtol)
self.diff_total = len(diffs[0])
if self.diff_total == 0:
# Then we're done
return
if self.numdiffs < 0:
numdiffs = self.diff_total
else:
numdiffs = self.numdiffs
self.diff_pixels = [
(idx, (self.a[idx], self.b[idx]))
for idx in islice(zip(*diffs), 0, numdiffs)
]
self.diff_ratio = float(self.diff_total) / float(len(self.a.flat))
def _report(self):
if self.diff_dimensions:
dimsa = " x ".join(str(d) for d in reversed(self.diff_dimensions[0]))
dimsb = " x ".join(str(d) for d in reversed(self.diff_dimensions[1]))
self._writeln(" Data dimensions differ:")
self._writeln(f" a: {dimsa}")
self._writeln(f" b: {dimsb}")
# For now we don't do any further comparison if the dimensions
# differ; though in the future it might be nice to be able to
# compare at least where the images intersect
self._writeln(" No further data comparison performed.")
return
if not self.diff_pixels:
return
for index, values in self.diff_pixels:
index = [x + 1 for x in reversed(index)]
self._writeln(f" Data differs at {index}:")
report_diff_values(
values[0],
values[1],
fileobj=self._fileobj,
indent_width=self._indent + 1,
rtol=self.rtol,
atol=self.atol,
)
if self.diff_total > self.numdiffs:
self._writeln(" ...")
self._writeln(
" {} different pixels found ({:.2%} different).".format(
self.diff_total, self.diff_ratio
)
)
class RawDataDiff(ImageDataDiff):
"""
`RawDataDiff` is just a special case of `ImageDataDiff` where the images
are one-dimensional, and the data is treated as a 1-dimensional array of
bytes instead of pixel values. This is used to compare the data of two
non-standard extension HDUs that were not recognized as containing image or
table data.
`ImageDataDiff` objects have the following diff attributes:
- ``diff_dimensions``: Same as the ``diff_dimensions`` attribute of
`ImageDataDiff` objects. Though the "dimension" of each array is just an
integer representing the number of bytes in the data.
- ``diff_bytes``: Like the ``diff_pixels`` attribute of `ImageDataDiff`
objects, but renamed to reflect the minor semantic difference that these
are raw bytes and not pixel values. Also the indices are integers
instead of tuples.
- ``diff_total`` and ``diff_ratio``: Same as `ImageDataDiff`.
"""
def __init__(self, a, b, numdiffs=10):
"""
Parameters
----------
a : BaseHDU
An HDU object.
b : BaseHDU
An HDU object to compare to the first HDU object.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
"""
self.diff_dimensions = ()
self.diff_bytes = []
super().__init__(a, b, numdiffs=numdiffs)
def _diff(self):
super()._diff()
if self.diff_dimensions:
self.diff_dimensions = (
self.diff_dimensions[0][0],
self.diff_dimensions[1][0],
)
self.diff_bytes = [(x[0], y) for x, y in self.diff_pixels]
del self.diff_pixels
def _report(self):
if self.diff_dimensions:
self._writeln(" Data sizes differ:")
self._writeln(f" a: {self.diff_dimensions[0]} bytes")
self._writeln(f" b: {self.diff_dimensions[1]} bytes")
# For now we don't do any further comparison if the dimensions
# differ; though in the future it might be nice to be able to
# compare at least where the images intersect
self._writeln(" No further data comparison performed.")
return
if not self.diff_bytes:
return
for index, values in self.diff_bytes:
self._writeln(f" Data differs at byte {index}:")
report_diff_values(
values[0],
values[1],
fileobj=self._fileobj,
indent_width=self._indent + 1,
rtol=self.rtol,
atol=self.atol,
)
self._writeln(" ...")
self._writeln(
" {} different bytes found ({:.2%} different).".format(
self.diff_total, self.diff_ratio
)
)
class TableDataDiff(_BaseDiff):
"""
Diff two table data arrays. It doesn't matter whether the data originally
came from a binary or ASCII table--the data should be passed in as a
recarray.
`TableDataDiff` objects have the following diff attributes:
- ``diff_column_count``: If the tables being compared have different
numbers of columns, this contains a 2-tuple of the column count in each
table. Even if the tables have different column counts, an attempt is
still made to compare any columns they have in common.
- ``diff_columns``: If either table contains columns unique to that table,
either in name or format, this contains a 2-tuple of lists. The first
element is a list of columns (these are full `Column` objects) that
appear only in table a. The second element is a list of tables that
appear only in table b. This only lists columns with different column
definitions, and has nothing to do with the data in those columns.
- ``diff_column_names``: This is like ``diff_columns``, but lists only the
names of columns unique to either table, rather than the full `Column`
objects.
- ``diff_column_attributes``: Lists columns that are in both tables but
have different secondary attributes, such as TUNIT or TDISP. The format
is a list of 2-tuples: The first a tuple of the column name and the
attribute, the second a tuple of the different values.
- ``diff_values``: `TableDataDiff` compares the data in each table on a
column-by-column basis. If any different data is found, it is added to
this list. The format of this list is similar to the ``diff_pixels``
attribute on `ImageDataDiff` objects, though the "index" consists of a
(column_name, row) tuple. For example::
[('TARGET', 0), ('NGC1001', 'NGC1002')]
shows that the tables contain different values in the 0-th row of the
'TARGET' column.
- ``diff_total`` and ``diff_ratio``: Same as `ImageDataDiff`.
`TableDataDiff` objects also have a ``common_columns`` attribute that lists
the `Column` objects for columns that are identical in both tables, and a
``common_column_names`` attribute which contains a set of the names of
those columns.
"""
def __init__(self, a, b, ignore_fields=[], numdiffs=10, rtol=0.0, atol=0.0):
"""
Parameters
----------
a : BaseHDU
An HDU object.
b : BaseHDU
An HDU object to compare to the first HDU object.
ignore_fields : sequence, optional
The (case-insensitive) names of any table columns to ignore if any
table data is to be compared.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionadded:: 2.0
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
"""
self.ignore_fields = set(ignore_fields)
self.numdiffs = numdiffs
self.rtol = rtol
self.atol = atol
self.common_columns = []
self.common_column_names = set()
# self.diff_columns contains columns with different column definitions,
# but not different column data. Column data is only compared in
# columns that have the same definitions
self.diff_rows = ()
self.diff_column_count = ()
self.diff_columns = ()
# If two columns have the same name+format, but other attributes are
# different (such as TUNIT or such) they are listed here
self.diff_column_attributes = []
# Like self.diff_columns, but just contains a list of the column names
# unique to each table, and in the order they appear in the tables
self.diff_column_names = ()
self.diff_values = []
self.diff_ratio = 0
self.diff_total = 0
super().__init__(a, b)
def _diff(self):
# Much of the code for comparing columns is similar to the code for
# comparing headers--consider refactoring
colsa = self.a.columns
colsb = self.b.columns
if len(colsa) != len(colsb):
self.diff_column_count = (len(colsa), len(colsb))
# Even if the number of columns are unequal, we still do comparison of
# any common columns
colsa = {c.name.lower(): c for c in colsa}
colsb = {c.name.lower(): c for c in colsb}
if "*" in self.ignore_fields:
# If all columns are to be ignored, ignore any further differences
# between the columns
return
# Keep the user's original ignore_fields list for reporting purposes,
# but internally use a case-insensitive version
ignore_fields = {f.lower() for f in self.ignore_fields}
# It might be nice if there were a cleaner way to do this, but for now
# it'll do
for fieldname in ignore_fields:
fieldname = fieldname.lower()
if fieldname in colsa:
del colsa[fieldname]
if fieldname in colsb:
del colsb[fieldname]
colsa_set = set(colsa.values())
colsb_set = set(colsb.values())
self.common_columns = sorted(
colsa_set.intersection(colsb_set), key=operator.attrgetter("name")
)
self.common_column_names = {col.name.lower() for col in self.common_columns}
left_only_columns = {
col.name.lower(): col for col in colsa_set.difference(colsb_set)
}
right_only_columns = {
col.name.lower(): col for col in colsb_set.difference(colsa_set)
}
if left_only_columns or right_only_columns:
self.diff_columns = (left_only_columns, right_only_columns)
self.diff_column_names = ([], [])
if left_only_columns:
for col in self.a.columns:
if col.name.lower() in left_only_columns:
self.diff_column_names[0].append(col.name)
if right_only_columns:
for col in self.b.columns:
if col.name.lower() in right_only_columns:
self.diff_column_names[1].append(col.name)
# If the tables have a different number of rows, we don't compare the
# columns right now.
# TODO: It might be nice to optionally compare the first n rows where n
# is the minimum of the row counts between the two tables.
if len(self.a) != len(self.b):
self.diff_rows = (len(self.a), len(self.b))
return
# If the tables contain no rows there's no data to compare, so we're
# done at this point. (See ticket #178)
if len(self.a) == len(self.b) == 0:
return
# Like in the old fitsdiff, compare tables on a column by column basis
# The difficulty here is that, while FITS column names are meant to be
# case-insensitive, Astropy still allows, for the sake of flexibility,
# two columns with the same name but different case. When columns are
# accessed in FITS tables, a case-sensitive is tried first, and failing
# that a case-insensitive match is made.
# It's conceivable that the same column could appear in both tables
# being compared, but with different case.
# Though it *may* lead to inconsistencies in these rare cases, this
# just assumes that there are no duplicated column names in either
# table, and that the column names can be treated case-insensitively.
for col in self.common_columns:
name_lower = col.name.lower()
if name_lower in ignore_fields:
continue
cola = colsa[name_lower]
colb = colsb[name_lower]
for attr, _ in _COL_ATTRS:
vala = getattr(cola, attr, None)
valb = getattr(colb, attr, None)
if diff_values(vala, valb):
self.diff_column_attributes.append(
((col.name.upper(), attr), (vala, valb))
)
arra = self.a[col.name]
arrb = self.b[col.name]
if np.issubdtype(arra.dtype, np.floating) and np.issubdtype(
arrb.dtype, np.floating
):
diffs = where_not_allclose(arra, arrb, rtol=self.rtol, atol=self.atol)
elif "P" in col.format:
diffs = (
[
idx
for idx in range(len(arra))
if not np.allclose(
arra[idx], arrb[idx], rtol=self.rtol, atol=self.atol
)
],
)
else:
diffs = np.where(arra != arrb)
self.diff_total += len(set(diffs[0]))
if self.numdiffs >= 0:
if len(self.diff_values) >= self.numdiffs:
# Don't save any more diff values
continue
# Add no more diff'd values than this
max_diffs = self.numdiffs - len(self.diff_values)
else:
max_diffs = len(diffs[0])
last_seen_idx = None
for idx in islice(diffs[0], 0, max_diffs):
if idx == last_seen_idx:
# Skip duplicate indices, which my occur when the column
# data contains multi-dimensional values; we're only
# interested in storing row-by-row differences
continue
last_seen_idx = idx
self.diff_values.append(((col.name, idx), (arra[idx], arrb[idx])))
total_values = len(self.a) * len(self.a.dtype.fields)
self.diff_ratio = float(self.diff_total) / float(total_values)
def _report(self):
if self.diff_column_count:
self._writeln(" Tables have different number of columns:")
self._writeln(f" a: {self.diff_column_count[0]}")
self._writeln(f" b: {self.diff_column_count[1]}")
if self.diff_column_names:
# Show columns with names unique to either table
for name in self.diff_column_names[0]:
format = self.diff_columns[0][name.lower()].format
self._writeln(f" Extra column {name} of format {format} in a")
for name in self.diff_column_names[1]:
format = self.diff_columns[1][name.lower()].format
self._writeln(f" Extra column {name} of format {format} in b")
col_attrs = dict(_COL_ATTRS)
# Now go through each table again and show columns with common
# names but other property differences...
for col_attr, vals in self.diff_column_attributes:
name, attr = col_attr
self._writeln(f" Column {name} has different {col_attrs[attr]}:")
report_diff_values(
vals[0],
vals[1],
fileobj=self._fileobj,
indent_width=self._indent + 1,
rtol=self.rtol,
atol=self.atol,
)
if self.diff_rows:
self._writeln(" Table rows differ:")
self._writeln(f" a: {self.diff_rows[0]}")
self._writeln(f" b: {self.diff_rows[1]}")
self._writeln(" No further data comparison performed.")
return
if not self.diff_values:
return
# Finally, let's go through and report column data differences:
for indx, values in self.diff_values:
self._writeln(" Column {} data differs in row {}:".format(*indx))
report_diff_values(
values[0],
values[1],
fileobj=self._fileobj,
indent_width=self._indent + 1,
rtol=self.rtol,
atol=self.atol,
)
if self.diff_values and self.numdiffs < self.diff_total:
self._writeln(
f" ...{self.diff_total - self.numdiffs} additional difference(s) found."
)
if self.diff_total > self.numdiffs:
self._writeln(" ...")
self._writeln(
" {} different table data element(s) found ({:.2%} different).".format(
self.diff_total, self.diff_ratio
)
)
def report_diff_keyword_attr(fileobj, attr, diffs, keyword, ind=0):
"""
Write a diff between two header keyword values or comments to the specified
file-like object.
"""
if keyword in diffs:
vals = diffs[keyword]
for idx, val in enumerate(vals):
if val is None:
continue
if idx == 0:
dup = ""
else:
dup = f"[{idx + 1}]"
fileobj.write(
fixed_width_indent(
f" Keyword {keyword:8}{dup} has different {attr}:\n",
ind,
)
)
report_diff_values(val[0], val[1], fileobj=fileobj, indent_width=ind + 1)
|
0f40e894732a8de671900e8304576c8829692189faa33370e3bc671fac8fd3f0 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import copy
import numbers
import operator
import re
import sys
import warnings
import weakref
from collections import OrderedDict
from contextlib import suppress
from functools import reduce
import numpy as np
from numpy import char as chararray
from astropy.utils import indent, isiterable, lazyproperty
from astropy.utils.exceptions import AstropyUserWarning
from .card import CARD_LENGTH, Card
from .util import NotifierMixin, _convert_array, _is_int, cmp, encode_ascii, pairwise
from .verify import VerifyError, VerifyWarning
__all__ = ["Column", "ColDefs", "Delayed"]
# mapping from TFORM data type to numpy data type (code)
# L: Logical (Boolean)
# B: Unsigned Byte
# I: 16-bit Integer
# J: 32-bit Integer
# K: 64-bit Integer
# E: Single-precision Floating Point
# D: Double-precision Floating Point
# C: Single-precision Complex
# M: Double-precision Complex
# A: Character
FITS2NUMPY = {
"L": "i1",
"B": "u1",
"I": "i2",
"J": "i4",
"K": "i8",
"E": "f4",
"D": "f8",
"C": "c8",
"M": "c16",
"A": "a",
}
# the inverse dictionary of the above
NUMPY2FITS = {val: key for key, val in FITS2NUMPY.items()}
# Normally booleans are represented as ints in Astropy, but if passed in a numpy
# boolean array, that should be supported
NUMPY2FITS["b1"] = "L"
# Add unsigned types, which will be stored as signed ints with a TZERO card.
NUMPY2FITS["u2"] = "I"
NUMPY2FITS["u4"] = "J"
NUMPY2FITS["u8"] = "K"
# Add half precision floating point numbers which will be up-converted to
# single precision.
NUMPY2FITS["f2"] = "E"
# This is the order in which values are converted to FITS types
# Note that only double precision floating point/complex are supported
FORMATORDER = ["L", "B", "I", "J", "K", "D", "M", "A"]
# Convert single precision floating point/complex to double precision.
FITSUPCONVERTERS = {"E": "D", "C": "M"}
# mapping from ASCII table TFORM data type to numpy data type
# A: Character
# I: Integer (32-bit)
# J: Integer (64-bit; non-standard)
# F: Float (64-bit; fixed decimal notation)
# E: Float (64-bit; exponential notation)
# D: Float (64-bit; exponential notation, always 64-bit by convention)
ASCII2NUMPY = {"A": "a", "I": "i4", "J": "i8", "F": "f8", "E": "f8", "D": "f8"}
# Maps FITS ASCII column format codes to the appropriate Python string
# formatting codes for that type.
ASCII2STR = {"A": "", "I": "d", "J": "d", "F": "f", "E": "E", "D": "E"}
# For each ASCII table format code, provides a default width (and decimal
# precision) for when one isn't given explicitly in the column format
ASCII_DEFAULT_WIDTHS = {
"A": (1, 0),
"I": (10, 0),
"J": (15, 0),
"E": (15, 7),
"F": (16, 7),
"D": (25, 17),
}
# TDISPn for both ASCII and Binary tables
TDISP_RE_DICT = {}
TDISP_RE_DICT["F"] = re.compile(
r"(?:(?P<formatc>[F])(?:(?P<width>[0-9]+)\.{1}" r"(?P<precision>[0-9])+)+)|"
)
TDISP_RE_DICT["A"] = TDISP_RE_DICT["L"] = re.compile(
r"(?:(?P<formatc>[AL])(?P<width>[0-9]+)+)|"
)
TDISP_RE_DICT["I"] = TDISP_RE_DICT["B"] = TDISP_RE_DICT["O"] = TDISP_RE_DICT[
"Z"
] = re.compile(
r"(?:(?P<formatc>[IBOZ])(?:(?P<width>[0-9]+)"
r"(?:\.{0,1}(?P<precision>[0-9]+))?))|"
)
TDISP_RE_DICT["E"] = TDISP_RE_DICT["G"] = TDISP_RE_DICT["D"] = re.compile(
r"(?:(?P<formatc>[EGD])(?:(?P<width>[0-9]+)\."
r"(?P<precision>[0-9]+))+)"
r"(?:E{0,1}(?P<exponential>[0-9]+)?)|"
)
TDISP_RE_DICT["EN"] = TDISP_RE_DICT["ES"] = re.compile(
r"(?:(?P<formatc>E[NS])(?:(?P<width>[0-9]+)\.{1}" r"(?P<precision>[0-9])+)+)"
)
# mapping from TDISP format to python format
# A: Character
# L: Logical (Boolean)
# I: 16-bit Integer
# Can't predefine zero padding and space padding before hand without
# knowing the value being formatted, so grabbing precision and using that
# to zero pad, ignoring width. Same with B, O, and Z
# B: Binary Integer
# O: Octal Integer
# Z: Hexadecimal Integer
# F: Float (64-bit; fixed decimal notation)
# EN: Float (engineering fortran format, exponential multiple of thee
# ES: Float (scientific, same as EN but non-zero leading digit
# E: Float, exponential notation
# Can't get exponential restriction to work without knowing value
# before hand, so just using width and precision, same with D, G, EN, and
# ES formats
# D: Double-precision Floating Point with exponential
# (E but for double precision)
# G: Double-precision Floating Point, may or may not show exponent
TDISP_FMT_DICT = {
"I": "{{:{width}d}}",
"B": "{{:{width}b}}",
"O": "{{:{width}o}}",
"Z": "{{:{width}x}}",
"F": "{{:{width}.{precision}f}}",
"G": "{{:{width}.{precision}g}}",
}
TDISP_FMT_DICT["A"] = TDISP_FMT_DICT["L"] = "{{:>{width}}}"
TDISP_FMT_DICT["E"] = TDISP_FMT_DICT["D"] = TDISP_FMT_DICT["EN"] = TDISP_FMT_DICT[
"ES"
] = "{{:{width}.{precision}e}}"
# tuple of column/field definition common names and keyword names, make
# sure to preserve the one-to-one correspondence when updating the list(s).
# Use lists, instead of dictionaries so the names can be displayed in a
# preferred order.
KEYWORD_NAMES = (
"TTYPE",
"TFORM",
"TUNIT",
"TNULL",
"TSCAL",
"TZERO",
"TDISP",
"TBCOL",
"TDIM",
"TCTYP",
"TCUNI",
"TCRPX",
"TCRVL",
"TCDLT",
"TRPOS",
)
KEYWORD_ATTRIBUTES = (
"name",
"format",
"unit",
"null",
"bscale",
"bzero",
"disp",
"start",
"dim",
"coord_type",
"coord_unit",
"coord_ref_point",
"coord_ref_value",
"coord_inc",
"time_ref_pos",
)
"""This is a list of the attributes that can be set on `Column` objects."""
KEYWORD_TO_ATTRIBUTE = OrderedDict(zip(KEYWORD_NAMES, KEYWORD_ATTRIBUTES))
ATTRIBUTE_TO_KEYWORD = OrderedDict(zip(KEYWORD_ATTRIBUTES, KEYWORD_NAMES))
# TODO: Define a list of default comments to associate with each table keyword
# TFORMn regular expression
TFORMAT_RE = re.compile(
r"(?P<repeat>^[0-9]*)(?P<format>[LXBIJKAEDCMPQ])" r"(?P<option>[!-~]*)", re.I
)
# TFORMn for ASCII tables; two different versions depending on whether
# the format is floating-point or not; allows empty values for width
# in which case defaults are used
TFORMAT_ASCII_RE = re.compile(
r"(?:(?P<format>[AIJ])(?P<width>[0-9]+)?)|"
r"(?:(?P<formatf>[FED])"
r"(?:(?P<widthf>[0-9]+)(?:\."
r"(?P<precision>[0-9]+))?)?)"
)
TTYPE_RE = re.compile(r"[0-9a-zA-Z_]+")
"""
Regular expression for valid table column names. See FITS Standard v3.0 section
7.2.2.
"""
# table definition keyword regular expression
TDEF_RE = re.compile(r"(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)")
# table dimension keyword regular expression (fairly flexible with whitespace)
TDIM_RE = re.compile(r"\(\s*(?P<dims>(?:\d+\s*)(?:,\s*\d+\s*)*\s*)\)\s*")
# value for ASCII table cell with value = TNULL
# this can be reset by user.
ASCIITNULL = 0
# The default placeholder to use for NULL values in ASCII tables when
# converting from binary to ASCII tables
DEFAULT_ASCII_TNULL = "---"
class Delayed:
"""Delayed file-reading data."""
def __init__(self, hdu=None, field=None):
self.hdu = weakref.proxy(hdu)
self.field = field
def __getitem__(self, key):
# This forces the data for the HDU to be read, which will replace
# the corresponding Delayed objects in the Tables Columns to be
# transformed into ndarrays. It will also return the value of the
# requested data element.
return self.hdu.data[key][self.field]
class _BaseColumnFormat(str):
"""
Base class for binary table column formats (just called _ColumnFormat)
and ASCII table column formats (_AsciiColumnFormat).
"""
def __eq__(self, other):
if not other:
return False
if isinstance(other, str):
if not isinstance(other, self.__class__):
try:
other = self.__class__(other)
except ValueError:
return False
else:
return False
return self.canonical == other.canonical
def __hash__(self):
return hash(self.canonical)
@lazyproperty
def dtype(self):
"""
The Numpy dtype object created from the format's associated recformat.
"""
return np.dtype(self.recformat)
@classmethod
def from_column_format(cls, format):
"""Creates a column format object from another column format object
regardless of their type.
That is, this can convert a _ColumnFormat to an _AsciiColumnFormat
or vice versa at least in cases where a direct translation is possible.
"""
return cls.from_recformat(format.recformat)
class _ColumnFormat(_BaseColumnFormat):
"""
Represents a FITS binary table column format.
This is an enhancement over using a normal string for the format, since the
repeat count, format code, and option are available as separate attributes,
and smart comparison is used. For example 1J == J.
"""
def __new__(cls, format):
self = super().__new__(cls, format)
self.repeat, self.format, self.option = _parse_tformat(format)
self.format = self.format.upper()
if self.format in ("P", "Q"):
# TODO: There should be a generic factory that returns either
# _FormatP or _FormatQ as appropriate for a given TFORMn
if self.format == "P":
recformat = _FormatP.from_tform(format)
else:
recformat = _FormatQ.from_tform(format)
# Format of variable length arrays
self.p_format = recformat.format
else:
self.p_format = None
return self
@classmethod
def from_recformat(cls, recformat):
"""Creates a column format from a Numpy record dtype format."""
return cls(_convert_format(recformat, reverse=True))
@lazyproperty
def recformat(self):
"""Returns the equivalent Numpy record format string."""
return _convert_format(self)
@lazyproperty
def canonical(self):
"""
Returns a 'canonical' string representation of this format.
This is in the proper form of rTa where T is the single character data
type code, a is the optional part, and r is the repeat. If repeat == 1
(the default) it is left out of this representation.
"""
if self.repeat == 1:
repeat = ""
else:
repeat = str(self.repeat)
return f"{repeat}{self.format}{self.option}"
class _AsciiColumnFormat(_BaseColumnFormat):
"""Similar to _ColumnFormat but specifically for columns in ASCII tables.
The formats of ASCII table columns and binary table columns are inherently
incompatible in FITS. They don't support the same ranges and types of
values, and even reuse format codes in subtly different ways. For example
the format code 'Iw' in ASCII columns refers to any integer whose string
representation is at most w characters wide, so 'I' can represent
effectively any integer that will fit in a FITS columns. Whereas for
binary tables 'I' very explicitly refers to a 16-bit signed integer.
Conversions between the two column formats can be performed using the
``to/from_binary`` methods on this class, or the ``to/from_ascii``
methods on the `_ColumnFormat` class. But again, not all conversions are
possible and may result in a `ValueError`.
"""
def __new__(cls, format, strict=False):
self = super().__new__(cls, format)
self.format, self.width, self.precision = _parse_ascii_tformat(format, strict)
# If no width has been specified, set the dtype here to default as well
if format == self.format:
self.recformat = ASCII2NUMPY[format]
# This is to support handling logical (boolean) data from binary tables
# in an ASCII table
self._pseudo_logical = False
return self
@classmethod
def from_column_format(cls, format):
inst = cls.from_recformat(format.recformat)
# Hack
if format.format == "L":
inst._pseudo_logical = True
return inst
@classmethod
def from_recformat(cls, recformat):
"""Creates a column format from a Numpy record dtype format."""
return cls(_convert_ascii_format(recformat, reverse=True))
@lazyproperty
def recformat(self):
"""Returns the equivalent Numpy record format string."""
return _convert_ascii_format(self)
@lazyproperty
def canonical(self):
"""
Returns a 'canonical' string representation of this format.
This is in the proper form of Tw.d where T is the single character data
type code, w is the width in characters for this field, and d is the
number of digits after the decimal place (for format codes 'E', 'F',
and 'D' only).
"""
if self.format in ("E", "F", "D"):
return f"{self.format}{self.width}.{self.precision}"
return f"{self.format}{self.width}"
class _FormatX(str):
"""For X format in binary tables."""
def __new__(cls, repeat=1):
nbytes = ((repeat - 1) // 8) + 1
# use an array, even if it is only ONE u1 (i.e. use tuple always)
obj = super().__new__(cls, repr((nbytes,)) + "u1")
obj.repeat = repeat
return obj
def __getnewargs__(self):
return (self.repeat,)
@property
def tform(self):
return f"{self.repeat}X"
# TODO: Table column formats need to be verified upon first reading the file;
# as it is, an invalid P format will raise a VerifyError from some deep,
# unexpected place
class _FormatP(str):
"""For P format in variable length table."""
# As far as I can tell from my reading of the FITS standard, a type code is
# *required* for P and Q formats; there is no default
_format_re_template = (
r"(?P<repeat>\d+)?{}(?P<dtype>[LXBIJKAEDCM])" r"(?:\((?P<max>\d*)\))?"
)
_format_code = "P"
_format_re = re.compile(_format_re_template.format(_format_code))
_descriptor_format = "2i4"
def __new__(cls, dtype, repeat=None, max=None):
obj = super().__new__(cls, cls._descriptor_format)
obj.format = NUMPY2FITS[dtype]
obj.dtype = dtype
obj.repeat = repeat
obj.max = max
return obj
def __getnewargs__(self):
return (self.dtype, self.repeat, self.max)
@classmethod
def from_tform(cls, format):
m = cls._format_re.match(format)
if not m or m.group("dtype") not in FITS2NUMPY:
raise VerifyError(f"Invalid column format: {format}")
repeat = m.group("repeat")
array_dtype = m.group("dtype")
max = m.group("max")
if not max:
max = None
return cls(FITS2NUMPY[array_dtype], repeat=repeat, max=max)
@property
def tform(self):
repeat = "" if self.repeat is None else self.repeat
max = "" if self.max is None else self.max
return f"{repeat}{self._format_code}{self.format}({max})"
class _FormatQ(_FormatP):
"""Carries type description of the Q format for variable length arrays.
The Q format is like the P format but uses 64-bit integers in the array
descriptors, allowing for heaps stored beyond 2GB into a file.
"""
_format_code = "Q"
_format_re = re.compile(_FormatP._format_re_template.format(_format_code))
_descriptor_format = "2i8"
class ColumnAttribute:
"""
Descriptor for attributes of `Column` that are associated with keywords
in the FITS header and describe properties of the column as specified in
the FITS standard.
Each `ColumnAttribute` may have a ``validator`` method defined on it.
This validates values set on this attribute to ensure that they meet the
FITS standard. Invalid values will raise a warning and will not be used in
formatting the column. The validator should take two arguments--the
`Column` it is being assigned to, and the new value for the attribute, and
it must raise an `AssertionError` if the value is invalid.
The `ColumnAttribute` itself is a decorator that can be used to define the
``validator`` for each column attribute. For example::
@ColumnAttribute('TTYPE')
def name(col, name):
if not isinstance(name, str):
raise AssertionError
The actual object returned by this decorator is the `ColumnAttribute`
instance though, not the ``name`` function. As such ``name`` is not a
method of the class it is defined in.
The setter for `ColumnAttribute` also updates the header of any table
HDU this column is attached to in order to reflect the change. The
``validator`` should ensure that the value is valid for inclusion in a FITS
header.
"""
def __init__(self, keyword):
self._keyword = keyword
self._validator = None
# The name of the attribute associated with this keyword is currently
# determined from the KEYWORD_NAMES/ATTRIBUTES lists. This could be
# make more flexible in the future, for example, to support custom
# column attributes.
self._attr = "_" + KEYWORD_TO_ATTRIBUTE[self._keyword]
def __get__(self, obj, objtype=None):
if obj is None:
return self
else:
return getattr(obj, self._attr)
def __set__(self, obj, value):
if self._validator is not None:
self._validator(obj, value)
old_value = getattr(obj, self._attr, None)
setattr(obj, self._attr, value)
obj._notify("column_attribute_changed", obj, self._attr[1:], old_value, value)
def __call__(self, func):
"""
Set the validator for this column attribute.
Returns ``self`` so that this can be used as a decorator, as described
in the docs for this class.
"""
self._validator = func
return self
def __repr__(self):
return f"{self.__class__.__name__}('{self._keyword}')"
class Column(NotifierMixin):
"""
Class which contains the definition of one column, e.g. ``ttype``,
``tform``, etc. and the array containing values for the column.
"""
def __init__(
self,
name=None,
format=None,
unit=None,
null=None,
bscale=None,
bzero=None,
disp=None,
start=None,
dim=None,
array=None,
ascii=None,
coord_type=None,
coord_unit=None,
coord_ref_point=None,
coord_ref_value=None,
coord_inc=None,
time_ref_pos=None,
):
"""
Construct a `Column` by specifying attributes. All attributes
except ``format`` can be optional; see :ref:`astropy:column_creation`
and :ref:`astropy:creating_ascii_table` for more information regarding
``TFORM`` keyword.
Parameters
----------
name : str, optional
column name, corresponding to ``TTYPE`` keyword
format : str
column format, corresponding to ``TFORM`` keyword
unit : str, optional
column unit, corresponding to ``TUNIT`` keyword
null : str, optional
null value, corresponding to ``TNULL`` keyword
bscale : int-like, optional
bscale value, corresponding to ``TSCAL`` keyword
bzero : int-like, optional
bzero value, corresponding to ``TZERO`` keyword
disp : str, optional
display format, corresponding to ``TDISP`` keyword
start : int, optional
column starting position (ASCII table only), corresponding
to ``TBCOL`` keyword
dim : str, optional
column dimension corresponding to ``TDIM`` keyword
array : iterable, optional
a `list`, `numpy.ndarray` (or other iterable that can be used to
initialize an ndarray) providing initial data for this column.
The array will be automatically converted, if possible, to the data
format of the column. In the case were non-trivial ``bscale``
and/or ``bzero`` arguments are given, the values in the array must
be the *physical* values--that is, the values of column as if the
scaling has already been applied (the array stored on the column
object will then be converted back to its storage values).
ascii : bool, optional
set `True` if this describes a column for an ASCII table; this
may be required to disambiguate the column format
coord_type : str, optional
coordinate/axis type corresponding to ``TCTYP`` keyword
coord_unit : str, optional
coordinate/axis unit corresponding to ``TCUNI`` keyword
coord_ref_point : int-like, optional
pixel coordinate of the reference point corresponding to ``TCRPX``
keyword
coord_ref_value : int-like, optional
coordinate value at reference point corresponding to ``TCRVL``
keyword
coord_inc : int-like, optional
coordinate increment at reference point corresponding to ``TCDLT``
keyword
time_ref_pos : str, optional
reference position for a time coordinate column corresponding to
``TRPOS`` keyword
"""
if format is None:
raise ValueError("Must specify format to construct Column.")
# any of the input argument (except array) can be a Card or just
# a number/string
kwargs = {"ascii": ascii}
for attr in KEYWORD_ATTRIBUTES:
value = locals()[attr] # get the argument's value
if isinstance(value, Card):
value = value.value
kwargs[attr] = value
valid_kwargs, invalid_kwargs = self._verify_keywords(**kwargs)
if invalid_kwargs:
msg = ["The following keyword arguments to Column were invalid:"]
for val in invalid_kwargs.values():
msg.append(indent(val[1]))
raise VerifyError("\n".join(msg))
for attr in KEYWORD_ATTRIBUTES:
setattr(self, attr, valid_kwargs.get(attr))
# TODO: Try to eliminate the following two special cases
# for recformat and dim:
# This is not actually stored as an attribute on columns for some
# reason
recformat = valid_kwargs["recformat"]
# The 'dim' keyword's original value is stored in self.dim, while
# *only* the tuple form is stored in self._dims.
self._dims = self.dim
self.dim = dim
# Awful hack to use for now to keep track of whether the column holds
# pseudo-unsigned int data
self._pseudo_unsigned_ints = False
# if the column data is not ndarray, make it to be one, i.e.
# input arrays can be just list or tuple, not required to be ndarray
# does not include Object array because there is no guarantee
# the elements in the object array are consistent.
if not isinstance(array, (np.ndarray, chararray.chararray, Delayed)):
try: # try to convert to a ndarray first
if array is not None:
array = np.array(array)
except Exception:
try: # then try to convert it to a strings array
itemsize = int(recformat[1:])
array = chararray.array(array, itemsize=itemsize)
except ValueError:
# then try variable length array
# Note: This includes _FormatQ by inheritance
if isinstance(recformat, _FormatP):
array = _VLF(array, dtype=recformat.dtype)
else:
raise ValueError(
f"Data is inconsistent with the format `{format}`."
)
array = self._convert_to_valid_data_type(array)
# We have required (through documentation) that arrays passed in to
# this constructor are already in their physical values, so we make
# note of that here
if isinstance(array, np.ndarray):
self._physical_values = True
else:
self._physical_values = False
self._parent_fits_rec = None
self.array = array
def __repr__(self):
text = ""
for attr in KEYWORD_ATTRIBUTES:
value = getattr(self, attr)
if value is not None:
text += attr + " = " + repr(value) + "; "
return text[:-2]
def __eq__(self, other):
"""
Two columns are equal if their name and format are the same. Other
attributes aren't taken into account at this time.
"""
# According to the FITS standard column names must be case-insensitive
a = (self.name.lower(), self.format)
b = (other.name.lower(), other.format)
return a == b
def __hash__(self):
"""
Like __eq__, the hash of a column should be based on the unique column
name and format, and be case-insensitive with respect to the column
name.
"""
return hash((self.name.lower(), self.format))
@property
def array(self):
"""
The Numpy `~numpy.ndarray` associated with this `Column`.
If the column was instantiated with an array passed to the ``array``
argument, this will return that array. However, if the column is
later added to a table, such as via `BinTableHDU.from_columns` as
is typically the case, this attribute will be updated to reference
the associated field in the table, which may no longer be the same
array.
"""
# Ideally the .array attribute never would have existed in the first
# place, or would have been internal-only. This is a legacy of the
# older design from Astropy that needs to have continued support, for
# now.
# One of the main problems with this design was that it created a
# reference cycle. When the .array attribute was updated after
# creating a FITS_rec from the column (as explained in the docstring) a
# reference cycle was created. This is because the code in BinTableHDU
# (and a few other places) does essentially the following:
#
# data._coldefs = columns # The ColDefs object holding this Column
# for col in columns:
# col.array = data.field(col.name)
#
# This way each columns .array attribute now points to the field in the
# table data. It's actually a pretty confusing interface (since it
# replaces the array originally pointed to by .array), but it's the way
# things have been for a long, long time.
#
# However, this results, in *many* cases, in a reference cycle.
# Because the array returned by data.field(col.name), while sometimes
# an array that owns its own data, is usually like a slice of the
# original data. It has the original FITS_rec as the array .base.
# This results in the following reference cycle (for the n-th column):
#
# data -> data._coldefs -> data._coldefs[n] ->
# data._coldefs[n].array -> data._coldefs[n].array.base -> data
#
# Because ndarray objects do not handled by Python's garbage collector
# the reference cycle cannot be broken. Therefore the FITS_rec's
# refcount never goes to zero, its __del__ is never called, and its
# memory is never freed. This didn't occur in *all* cases, but it did
# occur in many cases.
#
# To get around this, Column.array is no longer a simple attribute
# like it was previously. Now each Column has a ._parent_fits_rec
# attribute which is a weakref to a FITS_rec object. Code that
# previously assigned each col.array to field in a FITS_rec (as in
# the example a few paragraphs above) is still used, however now
# array.setter checks if a reference cycle will be created. And if
# so, instead of saving directly to the Column's __dict__, it creates
# the ._prent_fits_rec weakref, and all lookups of the column's .array
# go through that instead.
#
# This alone does not fully solve the problem. Because
# _parent_fits_rec is a weakref, if the user ever holds a reference to
# the Column, but deletes all references to the underlying FITS_rec,
# the .array attribute would suddenly start returning None instead of
# the array data. This problem is resolved on FITS_rec's end. See the
# note in the FITS_rec._coldefs property for the rest of the story.
# If the Columns's array is not a reference to an existing FITS_rec,
# then it is just stored in self.__dict__; otherwise check the
# _parent_fits_rec reference if it 's still available.
if "array" in self.__dict__:
return self.__dict__["array"]
elif self._parent_fits_rec is not None:
parent = self._parent_fits_rec()
if parent is not None:
return parent[self.name]
else:
return None
@array.setter
def array(self, array):
# The following looks over the bases of the given array to check if it
# has a ._coldefs attribute (i.e. is a FITS_rec) and that that _coldefs
# contains this Column itself, and would create a reference cycle if we
# stored the array directly in self.__dict__.
# In this case it instead sets up the _parent_fits_rec weakref to the
# underlying FITS_rec, so that array.getter can return arrays through
# self._parent_fits_rec().field(self.name), rather than storing a
# hard reference to the field like it used to.
base = array
while True:
if hasattr(base, "_coldefs") and isinstance(base._coldefs, ColDefs):
for col in base._coldefs:
if col is self and self._parent_fits_rec is None:
self._parent_fits_rec = weakref.ref(base)
# Just in case the user already set .array to their own
# array.
if "array" in self.__dict__:
del self.__dict__["array"]
return
if getattr(base, "base", None) is not None:
base = base.base
else:
break
self.__dict__["array"] = array
@array.deleter
def array(self):
try:
del self.__dict__["array"]
except KeyError:
pass
self._parent_fits_rec = None
@ColumnAttribute("TTYPE")
def name(col, name):
if name is None:
# Allow None to indicate deleting the name, or to just indicate an
# unspecified name (when creating a new Column).
return
# Check that the name meets the recommended standard--other column
# names are *allowed*, but will be discouraged
if isinstance(name, str) and not TTYPE_RE.match(name):
warnings.warn(
"It is strongly recommended that column names contain only "
"upper and lower-case ASCII letters, digits, or underscores "
"for maximum compatibility with other software "
"(got {!r}).".format(name),
VerifyWarning,
)
# This ensures that the new name can fit into a single FITS card
# without any special extension like CONTINUE cards or the like.
if not isinstance(name, str) or len(str(Card("TTYPE", name))) != CARD_LENGTH:
raise AssertionError(
"Column name must be a string able to fit in a single "
"FITS card--typically this means a maximum of 68 "
"characters, though it may be fewer if the string "
"contains special characters like quotes."
)
@ColumnAttribute("TCTYP")
def coord_type(col, coord_type):
if coord_type is None:
return
if not isinstance(coord_type, str) or len(coord_type) > 8:
raise AssertionError(
"Coordinate/axis type must be a string of atmost 8 characters."
)
@ColumnAttribute("TCUNI")
def coord_unit(col, coord_unit):
if coord_unit is not None and not isinstance(coord_unit, str):
raise AssertionError("Coordinate/axis unit must be a string.")
@ColumnAttribute("TCRPX")
def coord_ref_point(col, coord_ref_point):
if coord_ref_point is not None and not isinstance(
coord_ref_point, numbers.Real
):
raise AssertionError(
"Pixel coordinate of the reference point must be real floating type."
)
@ColumnAttribute("TCRVL")
def coord_ref_value(col, coord_ref_value):
if coord_ref_value is not None and not isinstance(
coord_ref_value, numbers.Real
):
raise AssertionError(
"Coordinate value at reference point must be real floating type."
)
@ColumnAttribute("TCDLT")
def coord_inc(col, coord_inc):
if coord_inc is not None and not isinstance(coord_inc, numbers.Real):
raise AssertionError("Coordinate increment must be real floating type.")
@ColumnAttribute("TRPOS")
def time_ref_pos(col, time_ref_pos):
if time_ref_pos is not None and not isinstance(time_ref_pos, str):
raise AssertionError("Time reference position must be a string.")
format = ColumnAttribute("TFORM")
unit = ColumnAttribute("TUNIT")
null = ColumnAttribute("TNULL")
bscale = ColumnAttribute("TSCAL")
bzero = ColumnAttribute("TZERO")
disp = ColumnAttribute("TDISP")
start = ColumnAttribute("TBCOL")
dim = ColumnAttribute("TDIM")
@lazyproperty
def ascii(self):
"""Whether this `Column` represents a column in an ASCII table."""
return isinstance(self.format, _AsciiColumnFormat)
@lazyproperty
def dtype(self):
return self.format.dtype
def copy(self):
"""
Return a copy of this `Column`.
"""
tmp = Column(format="I") # just use a throw-away format
tmp.__dict__ = self.__dict__.copy()
return tmp
@staticmethod
def _convert_format(format, cls):
"""The format argument to this class's initializer may come in many
forms. This uses the given column format class ``cls`` to convert
to a format of that type.
TODO: There should be an abc base class for column format classes
"""
# Short circuit in case we're already a _BaseColumnFormat--there is at
# least one case in which this can happen
if isinstance(format, _BaseColumnFormat):
return format, format.recformat
if format in NUMPY2FITS:
with suppress(VerifyError):
# legit recarray format?
recformat = format
format = cls.from_recformat(format)
try:
# legit FITS format?
format = cls(format)
recformat = format.recformat
except VerifyError:
raise VerifyError(f"Illegal format `{format}`.")
return format, recformat
@classmethod
def _verify_keywords(
cls,
name=None,
format=None,
unit=None,
null=None,
bscale=None,
bzero=None,
disp=None,
start=None,
dim=None,
ascii=None,
coord_type=None,
coord_unit=None,
coord_ref_point=None,
coord_ref_value=None,
coord_inc=None,
time_ref_pos=None,
):
"""
Given the keyword arguments used to initialize a Column, specifically
those that typically read from a FITS header (so excluding array),
verify that each keyword has a valid value.
Returns a 2-tuple of dicts. The first maps valid keywords to their
values. The second maps invalid keywords to a 2-tuple of their value,
and a message explaining why they were found invalid.
"""
valid = {}
invalid = {}
try:
format, recformat = cls._determine_formats(format, start, dim, ascii)
valid.update(format=format, recformat=recformat)
except (ValueError, VerifyError) as err:
msg = (
f"Column format option (TFORMn) failed verification: {err!s} "
"The invalid value will be ignored for the purpose of "
"formatting the data in this column."
)
invalid["format"] = (format, msg)
except AttributeError as err:
msg = (
"Column format option (TFORMn) must be a string with a valid "
f"FITS table format (got {format!s}: {err!s}). "
"The invalid value will be ignored for the purpose of "
"formatting the data in this column."
)
invalid["format"] = (format, msg)
# Currently we don't have any validation for name, unit, bscale, or
# bzero so include those by default
# TODO: Add validation for these keywords, obviously
for k, v in [
("name", name),
("unit", unit),
("bscale", bscale),
("bzero", bzero),
]:
if v is not None and v != "":
valid[k] = v
# Validate null option
# Note: Enough code exists that thinks empty strings are sensible
# inputs for these options that we need to treat '' as None
if null is not None and null != "":
msg = None
if isinstance(format, _AsciiColumnFormat):
null = str(null)
if len(null) > format.width:
msg = (
"ASCII table null option (TNULLn) is longer than "
"the column's character width and will be truncated "
"(got {!r}).".format(null)
)
else:
tnull_formats = ("B", "I", "J", "K")
if not _is_int(null):
# Make this an exception instead of a warning, since any
# non-int value is meaningless
msg = (
"Column null option (TNULLn) must be an integer for "
"binary table columns (got {!r}). The invalid value "
"will be ignored for the purpose of formatting "
"the data in this column.".format(null)
)
elif not (
format.format in tnull_formats
or (
format.format in ("P", "Q") and format.p_format in tnull_formats
)
):
# TODO: We should also check that TNULLn's integer value
# is in the range allowed by the column's format
msg = (
"Column null option (TNULLn) is invalid for binary "
"table columns of type {!r} (got {!r}). The invalid "
"value will be ignored for the purpose of formatting "
"the data in this column.".format(format, null)
)
if msg is None:
valid["null"] = null
else:
invalid["null"] = (null, msg)
# Validate the disp option
# TODO: Add full parsing and validation of TDISPn keywords
if disp is not None and disp != "":
msg = None
if not isinstance(disp, str):
msg = (
"Column disp option (TDISPn) must be a string (got "
f"{disp!r}). The invalid value will be ignored for the "
"purpose of formatting the data in this column."
)
elif isinstance(format, _AsciiColumnFormat) and disp[0].upper() == "L":
# disp is at least one character long and has the 'L' format
# which is not recognized for ASCII tables
msg = (
"Column disp option (TDISPn) may not use the 'L' format "
"with ASCII table columns. The invalid value will be "
"ignored for the purpose of formatting the data in this "
"column."
)
if msg is None:
try:
_parse_tdisp_format(disp)
valid["disp"] = disp
except VerifyError as err:
msg = (
"Column disp option (TDISPn) failed verification: "
f"{err!s} The invalid value will be ignored for the "
"purpose of formatting the data in this column."
)
invalid["disp"] = (disp, msg)
else:
invalid["disp"] = (disp, msg)
# Validate the start option
if start is not None and start != "":
msg = None
if not isinstance(format, _AsciiColumnFormat):
# The 'start' option only applies to ASCII columns
msg = (
"Column start option (TBCOLn) is not allowed for binary "
"table columns (got {!r}). The invalid keyword will be "
"ignored for the purpose of formatting the data in this "
"column.".format(start)
)
else:
try:
start = int(start)
except (TypeError, ValueError):
pass
if not _is_int(start) or start < 1:
msg = (
"Column start option (TBCOLn) must be a positive integer "
"(got {!r}). The invalid value will be ignored for the "
"purpose of formatting the data in this column.".format(start)
)
if msg is None:
valid["start"] = start
else:
invalid["start"] = (start, msg)
# Process TDIMn options
# ASCII table columns can't have a TDIMn keyword associated with it;
# for now we just issue a warning and ignore it.
# TODO: This should be checked by the FITS verification code
if dim is not None and dim != "":
msg = None
dims_tuple = tuple()
# NOTE: If valid, the dim keyword's value in the the valid dict is
# a tuple, not the original string; if invalid just the original
# string is returned
if isinstance(format, _AsciiColumnFormat):
msg = (
"Column dim option (TDIMn) is not allowed for ASCII table "
"columns (got {!r}). The invalid keyword will be ignored "
"for the purpose of formatting this column.".format(dim)
)
elif isinstance(dim, str):
dims_tuple = _parse_tdim(dim)
elif isinstance(dim, tuple):
dims_tuple = dim
else:
msg = (
"`dim` argument must be a string containing a valid value "
"for the TDIMn header keyword associated with this column, "
"or a tuple containing the C-order dimensions for the "
"column. The invalid value will be ignored for the purpose "
"of formatting this column."
)
if dims_tuple:
if isinstance(recformat, _FormatP):
# TDIMs have different meaning for VLA format,
# no warning should be thrown
msg = None
elif reduce(operator.mul, dims_tuple) > format.repeat:
msg = (
"The repeat count of the column format {!r} for column {!r} "
"is fewer than the number of elements per the TDIM "
"argument {!r}. The invalid TDIMn value will be ignored "
"for the purpose of formatting this column.".format(
name, format, dim
)
)
if msg is None:
valid["dim"] = dims_tuple
else:
invalid["dim"] = (dim, msg)
if coord_type is not None and coord_type != "":
msg = None
if not isinstance(coord_type, str):
msg = (
"Coordinate/axis type option (TCTYPn) must be a string "
"(got {!r}). The invalid keyword will be ignored for the "
"purpose of formatting this column.".format(coord_type)
)
elif len(coord_type) > 8:
msg = (
"Coordinate/axis type option (TCTYPn) must be a string "
"of atmost 8 characters (got {!r}). The invalid keyword "
"will be ignored for the purpose of formatting this "
"column.".format(coord_type)
)
if msg is None:
valid["coord_type"] = coord_type
else:
invalid["coord_type"] = (coord_type, msg)
if coord_unit is not None and coord_unit != "":
msg = None
if not isinstance(coord_unit, str):
msg = (
"Coordinate/axis unit option (TCUNIn) must be a string "
"(got {!r}). The invalid keyword will be ignored for the "
"purpose of formatting this column.".format(coord_unit)
)
if msg is None:
valid["coord_unit"] = coord_unit
else:
invalid["coord_unit"] = (coord_unit, msg)
for k, v in [
("coord_ref_point", coord_ref_point),
("coord_ref_value", coord_ref_value),
("coord_inc", coord_inc),
]:
if v is not None and v != "":
msg = None
if not isinstance(v, numbers.Real):
msg = (
"Column {} option ({}n) must be a real floating type (got"
" {!r}). The invalid value will be ignored for the purpose of"
" formatting the data in this column.".format(
k, ATTRIBUTE_TO_KEYWORD[k], v
)
)
if msg is None:
valid[k] = v
else:
invalid[k] = (v, msg)
if time_ref_pos is not None and time_ref_pos != "":
msg = None
if not isinstance(time_ref_pos, str):
msg = (
"Time coordinate reference position option (TRPOSn) must be "
"a string (got {!r}). The invalid keyword will be ignored for "
"the purpose of formatting this column.".format(time_ref_pos)
)
if msg is None:
valid["time_ref_pos"] = time_ref_pos
else:
invalid["time_ref_pos"] = (time_ref_pos, msg)
return valid, invalid
@classmethod
def _determine_formats(cls, format, start, dim, ascii):
"""
Given a format string and whether or not the Column is for an
ASCII table (ascii=None means unspecified, but lean toward binary table
where ambiguous) create an appropriate _BaseColumnFormat instance for
the column's format, and determine the appropriate recarray format.
The values of the start and dim keyword arguments are also useful, as
the former is only valid for ASCII tables and the latter only for
BINARY tables.
"""
# If the given format string is unambiguously a Numpy dtype or one of
# the Numpy record format type specifiers supported by Astropy then that
# should take priority--otherwise assume it is a FITS format
if isinstance(format, np.dtype):
format, _, _ = _dtype_to_recformat(format)
# check format
if ascii is None and not isinstance(format, _BaseColumnFormat):
# We're just give a string which could be either a Numpy format
# code, or a format for a binary column array *or* a format for an
# ASCII column array--there may be many ambiguities here. Try our
# best to guess what the user intended.
format, recformat = cls._guess_format(format, start, dim)
elif not ascii and not isinstance(format, _BaseColumnFormat):
format, recformat = cls._convert_format(format, _ColumnFormat)
elif ascii and not isinstance(format, _AsciiColumnFormat):
format, recformat = cls._convert_format(format, _AsciiColumnFormat)
else:
# The format is already acceptable and unambiguous
recformat = format.recformat
return format, recformat
@classmethod
def _guess_format(cls, format, start, dim):
if start and dim:
# This is impossible; this can't be a valid FITS column
raise ValueError(
"Columns cannot have both a start (TCOLn) and dim "
"(TDIMn) option, since the former is only applies to "
"ASCII tables, and the latter is only valid for binary "
"tables."
)
elif start:
# Only ASCII table columns can have a 'start' option
guess_format = _AsciiColumnFormat
elif dim:
# Only binary tables can have a dim option
guess_format = _ColumnFormat
else:
# If the format is *technically* a valid binary column format
# (i.e. it has a valid format code followed by arbitrary
# "optional" codes), but it is also strictly a valid ASCII
# table format, then assume an ASCII table column was being
# requested (the more likely case, after all).
with suppress(VerifyError):
format = _AsciiColumnFormat(format, strict=True)
# A safe guess which reflects the existing behavior of previous
# Astropy versions
guess_format = _ColumnFormat
try:
format, recformat = cls._convert_format(format, guess_format)
except VerifyError:
# For whatever reason our guess was wrong (for example if we got
# just 'F' that's not a valid binary format, but it an ASCII format
# code albeit with the width/precision omitted
guess_format = (
_AsciiColumnFormat if guess_format is _ColumnFormat else _ColumnFormat
)
# If this fails too we're out of options--it is truly an invalid
# format, or at least not supported
format, recformat = cls._convert_format(format, guess_format)
return format, recformat
def _convert_to_valid_data_type(self, array):
# Convert the format to a type we understand
if isinstance(array, Delayed):
return array
elif array is None:
return array
else:
format = self.format
dims = self._dims
if dims and format.format not in "PQ":
shape = dims[:-1] if "A" in format else dims
shape = (len(array),) + shape
array = array.reshape(shape)
if "P" in format or "Q" in format:
return array
elif "A" in format:
if array.dtype.char in "SU":
if dims:
# The 'last' dimension (first in the order given
# in the TDIMn keyword itself) is the number of
# characters in each string
fsize = dims[-1]
else:
fsize = np.dtype(format.recformat).itemsize
return chararray.array(array, itemsize=fsize, copy=False)
else:
return _convert_array(array, np.dtype(format.recformat))
elif "L" in format:
# boolean needs to be scaled back to storage values ('T', 'F')
if array.dtype == np.dtype("bool"):
return np.where(array == np.False_, ord("F"), ord("T"))
else:
return np.where(array == 0, ord("F"), ord("T"))
elif "X" in format:
return _convert_array(array, np.dtype("uint8"))
else:
# Preserve byte order of the original array for now; see #77
numpy_format = array.dtype.byteorder + format.recformat
# Handle arrays passed in as unsigned ints as pseudo-unsigned
# int arrays; blatantly tacked in here for now--we need columns
# to have explicit knowledge of whether they treated as
# pseudo-unsigned
bzeros = {
2: np.uint16(2**15),
4: np.uint32(2**31),
8: np.uint64(2**63),
}
if (
array.dtype.kind == "u"
and array.dtype.itemsize in bzeros
and self.bscale in (1, None, "")
and self.bzero == bzeros[array.dtype.itemsize]
):
# Basically the array is uint, has scale == 1.0, and the
# bzero is the appropriate value for a pseudo-unsigned
# integer of the input dtype, then go ahead and assume that
# uint is assumed
numpy_format = numpy_format.replace("i", "u")
self._pseudo_unsigned_ints = True
# The .base here means we're dropping the shape information,
# which is only used to format recarray fields, and is not
# useful for converting input arrays to the correct data type
dtype = np.dtype(numpy_format).base
return _convert_array(array, dtype)
class ColDefs(NotifierMixin):
"""
Column definitions class.
It has attributes corresponding to the `Column` attributes
(e.g. `ColDefs` has the attribute ``names`` while `Column`
has ``name``). Each attribute in `ColDefs` is a list of
corresponding attribute values from all `Column` objects.
"""
_padding_byte = "\x00"
_col_format_cls = _ColumnFormat
def __new__(cls, input, ascii=False):
klass = cls
if hasattr(input, "_columns_type") and issubclass(input._columns_type, ColDefs):
klass = input._columns_type
elif hasattr(input, "_col_format_cls") and issubclass(
input._col_format_cls, _AsciiColumnFormat
):
klass = _AsciiColDefs
if ascii: # force ASCII if this has been explicitly requested
klass = _AsciiColDefs
return object.__new__(klass)
def __getnewargs__(self):
return (self._arrays,)
def __init__(self, input, ascii=False):
"""
Parameters
----------
input : sequence of `Column` or `ColDefs` or ndarray or `~numpy.recarray`
An existing table HDU, an existing `ColDefs`, or any multi-field
Numpy array or `numpy.recarray`.
ascii : bool
Use True to ensure that ASCII table columns are used.
"""
from .fitsrec import FITS_rec
from .hdu.table import _TableBaseHDU
if isinstance(input, ColDefs):
self._init_from_coldefs(input)
elif (
isinstance(input, FITS_rec)
and hasattr(input, "_coldefs")
and input._coldefs
):
# If given a FITS_rec object we can directly copy its columns, but
# only if its columns have already been defined, otherwise this
# will loop back in on itself and blow up
self._init_from_coldefs(input._coldefs)
elif isinstance(input, np.ndarray) and input.dtype.fields is not None:
# Construct columns from the fields of a record array
self._init_from_array(input)
elif isiterable(input):
# if the input is a list of Columns
self._init_from_sequence(input)
elif isinstance(input, _TableBaseHDU):
# Construct columns from fields in an HDU header
self._init_from_table(input)
else:
raise TypeError(
"Input to ColDefs must be a table HDU, a list "
"of Columns, or a record/field array."
)
# Listen for changes on all columns
for col in self.columns:
col._add_listener(self)
def _init_from_coldefs(self, coldefs):
"""Initialize from an existing ColDefs object (just copy the
columns and convert their formats if necessary).
"""
self.columns = [self._copy_column(col) for col in coldefs]
def _init_from_sequence(self, columns):
for idx, col in enumerate(columns):
if not isinstance(col, Column):
raise TypeError(f"Element {idx} in the ColDefs input is not a Column.")
self._init_from_coldefs(columns)
def _init_from_array(self, array):
self.columns = []
for idx in range(len(array.dtype)):
cname = array.dtype.names[idx]
ftype = array.dtype.fields[cname][0]
format = self._col_format_cls.from_recformat(ftype)
# Determine the appropriate dimensions for items in the column
dim = array.dtype[idx].shape[::-1]
if dim and (len(dim) > 0 or "A" in format):
if "A" in format:
# should take into account multidimensional items in the column
dimel = int(re.findall("[0-9]+", str(ftype.subdtype[0]))[0])
# n x m string arrays must include the max string
# length in their dimensions (e.g. l x n x m)
dim = (dimel,) + dim
dim = "(" + ",".join(str(d) for d in dim) + ")"
else:
dim = None
# Check for unsigned ints.
bzero = None
if ftype.base.kind == "u":
if "I" in format:
bzero = np.uint16(2**15)
elif "J" in format:
bzero = np.uint32(2**31)
elif "K" in format:
bzero = np.uint64(2**63)
c = Column(
name=cname,
format=format,
array=array.view(np.ndarray)[cname],
bzero=bzero,
dim=dim,
)
self.columns.append(c)
def _init_from_table(self, table):
hdr = table._header
nfields = hdr["TFIELDS"]
# go through header keywords to pick out column definition keywords
# definition dictionaries for each field
col_keywords = [{} for i in range(nfields)]
for keyword in hdr:
key = TDEF_RE.match(keyword)
try:
label = key.group("label")
except Exception:
continue # skip if there is no match
if label in KEYWORD_NAMES:
col = int(key.group("num"))
if 0 < col <= nfields:
attr = KEYWORD_TO_ATTRIBUTE[label]
value = hdr[keyword]
if attr == "format":
# Go ahead and convert the format value to the
# appropriate ColumnFormat container now
value = self._col_format_cls(value)
col_keywords[col - 1][attr] = value
# Verify the column keywords and display any warnings if necessary;
# we only want to pass on the valid keywords
for idx, kwargs in enumerate(col_keywords):
valid_kwargs, invalid_kwargs = Column._verify_keywords(**kwargs)
for val in invalid_kwargs.values():
warnings.warn(
f"Invalid keyword for column {idx + 1}: {val[1]}", VerifyWarning
)
# Special cases for recformat and dim
# TODO: Try to eliminate the need for these special cases
del valid_kwargs["recformat"]
if "dim" in valid_kwargs:
valid_kwargs["dim"] = kwargs["dim"]
col_keywords[idx] = valid_kwargs
# data reading will be delayed
for col in range(nfields):
col_keywords[col]["array"] = Delayed(table, col)
# now build the columns
self.columns = [Column(**attrs) for attrs in col_keywords]
# Add the table HDU is a listener to changes to the columns
# (either changes to individual columns, or changes to the set of
# columns (add/remove/etc.))
self._add_listener(table)
def __copy__(self):
return self.__class__(self)
def __deepcopy__(self, memo):
return self.__class__([copy.deepcopy(c, memo) for c in self.columns])
def _copy_column(self, column):
"""Utility function used currently only by _init_from_coldefs
to help convert columns from binary format to ASCII format or vice
versa if necessary (otherwise performs a straight copy).
"""
if isinstance(column.format, self._col_format_cls):
# This column has a FITS format compatible with this column
# definitions class (that is ascii or binary)
return column.copy()
new_column = column.copy()
# Try to use the Numpy recformat as the equivalency between the
# two formats; if that conversion can't be made then these
# columns can't be transferred
# TODO: Catch exceptions here and raise an explicit error about
# column format conversion
new_column.format = self._col_format_cls.from_column_format(column.format)
# Handle a few special cases of column format options that are not
# compatible between ASCII an binary tables
# TODO: This is sort of hacked in right now; we really need
# separate classes for ASCII and Binary table Columns, and they
# should handle formatting issues like these
if not isinstance(new_column.format, _AsciiColumnFormat):
# the column is a binary table column...
new_column.start = None
if new_column.null is not None:
# We can't just "guess" a value to represent null
# values in the new column, so just disable this for
# now; users may modify it later
new_column.null = None
else:
# the column is an ASCII table column...
if new_column.null is not None:
new_column.null = DEFAULT_ASCII_TNULL
if new_column.disp is not None and new_column.disp.upper().startswith("L"):
# ASCII columns may not use the logical data display format;
# for now just drop the TDISPn option for this column as we
# don't have a systematic conversion of boolean data to ASCII
# tables yet
new_column.disp = None
return new_column
def __getattr__(self, name):
"""
Automatically returns the values for the given keyword attribute for
all `Column`s in this list.
Implements for example self.units, self.formats, etc.
"""
cname = name[:-1]
if cname in KEYWORD_ATTRIBUTES and name[-1] == "s":
attr = []
for col in self.columns:
val = getattr(col, cname)
attr.append(val if val is not None else "")
return attr
raise AttributeError(name)
@lazyproperty
def dtype(self):
# Note: This previously returned a dtype that just used the raw field
# widths based on the format's repeat count, and did not incorporate
# field *shapes* as provided by TDIMn keywords.
# Now this incorporates TDIMn from the start, which makes *this* method
# a little more complicated, but simplifies code elsewhere (for example
# fields will have the correct shapes even in the raw recarray).
formats = []
offsets = [0]
for format_, dim in zip(self.formats, self._dims):
dt = format_.dtype
if len(offsets) < len(self.formats):
# Note: the size of the *original* format_ may be greater than
# one would expect from the number of elements determined by
# dim. The FITS format allows this--the rest of the field is
# filled with undefined values.
offsets.append(offsets[-1] + dt.itemsize)
if dim and format_.format not in "PQ":
# Note: VLA array descriptors should not be reshaped
# as they are always of shape (2,)
if format_.format == "A":
dt = np.dtype((dt.char + str(dim[-1]), dim[:-1]))
else:
dt = np.dtype((dt.base, dim))
formats.append(dt)
return np.dtype({"names": self.names, "formats": formats, "offsets": offsets})
@lazyproperty
def names(self):
return [col.name for col in self.columns]
@lazyproperty
def formats(self):
return [col.format for col in self.columns]
@lazyproperty
def _arrays(self):
return [col.array for col in self.columns]
@lazyproperty
def _recformats(self):
return [fmt.recformat for fmt in self.formats]
@lazyproperty
def _dims(self):
"""Returns the values of the TDIMn keywords parsed into tuples."""
return [col._dims for col in self.columns]
def __getitem__(self, key):
if isinstance(key, str):
key = _get_index(self.names, key)
x = self.columns[key]
if _is_int(key):
return x
else:
return ColDefs(x)
def __len__(self):
return len(self.columns)
def __repr__(self):
rep = "ColDefs("
if hasattr(self, "columns") and self.columns:
# The hasattr check is mostly just useful in debugging sessions
# where self.columns may not be defined yet
rep += "\n "
rep += "\n ".join([repr(c) for c in self.columns])
rep += "\n"
rep += ")"
return rep
def __add__(self, other, option="left"):
if isinstance(other, Column):
b = [other]
elif isinstance(other, ColDefs):
b = list(other.columns)
else:
raise TypeError("Wrong type of input.")
if option == "left":
tmp = list(self.columns) + b
else:
tmp = b + list(self.columns)
return ColDefs(tmp)
def __radd__(self, other):
return self.__add__(other, "right")
def __sub__(self, other):
if not isinstance(other, (list, tuple)):
other = [other]
_other = [_get_index(self.names, key) for key in other]
indx = list(range(len(self)))
for x in _other:
indx.remove(x)
tmp = [self[i] for i in indx]
return ColDefs(tmp)
def _update_column_attribute_changed(self, column, attr, old_value, new_value):
"""
Handle column attribute changed notifications from columns that are
members of this `ColDefs`.
`ColDefs` itself does not currently do anything with this, and just
bubbles the notification up to any listening table HDUs that may need
to update their headers, etc. However, this also informs the table of
the numerical index of the column that changed.
"""
idx = 0
for idx, col in enumerate(self.columns):
if col is column:
break
if attr == "name":
del self.names
elif attr == "format":
del self.formats
self._notify(
"column_attribute_changed", column, idx, attr, old_value, new_value
)
def add_col(self, column):
"""
Append one `Column` to the column definition.
"""
if not isinstance(column, Column):
raise AssertionError
# Ask the HDU object to load the data before we modify our columns
self._notify("load_data")
self._arrays.append(column.array)
# Obliterate caches of certain things
del self.dtype
del self._recformats
del self._dims
del self.names
del self.formats
self.columns.append(column)
# Listen for changes on the new column
column._add_listener(self)
# If this ColDefs is being tracked by a Table, inform the
# table that its data is now invalid.
self._notify("column_added", self, column)
return self
def del_col(self, col_name):
"""
Delete (the definition of) one `Column`.
col_name : str or int
The column's name or index
"""
# Ask the HDU object to load the data before we modify our columns
self._notify("load_data")
indx = _get_index(self.names, col_name)
col = self.columns[indx]
del self._arrays[indx]
# Obliterate caches of certain things
del self.dtype
del self._recformats
del self._dims
del self.names
del self.formats
del self.columns[indx]
col._remove_listener(self)
# If this ColDefs is being tracked by a table HDU, inform the HDU (or
# any other listeners) that the column has been removed
# Just send a reference to self, and the index of the column that was
# removed
self._notify("column_removed", self, indx)
return self
def change_attrib(self, col_name, attrib, new_value):
"""
Change an attribute (in the ``KEYWORD_ATTRIBUTES`` list) of a `Column`.
Parameters
----------
col_name : str or int
The column name or index to change
attrib : str
The attribute name
new_value : object
The new value for the attribute
"""
setattr(self[col_name], attrib, new_value)
def change_name(self, col_name, new_name):
"""
Change a `Column`'s name.
Parameters
----------
col_name : str
The current name of the column
new_name : str
The new name of the column
"""
if new_name != col_name and new_name in self.names:
raise ValueError(f"New name {new_name} already exists.")
else:
self.change_attrib(col_name, "name", new_name)
def change_unit(self, col_name, new_unit):
"""
Change a `Column`'s unit.
Parameters
----------
col_name : str or int
The column name or index
new_unit : str
The new unit for the column
"""
self.change_attrib(col_name, "unit", new_unit)
def info(self, attrib="all", output=None):
"""
Get attribute(s) information of the column definition.
Parameters
----------
attrib : str
Can be one or more of the attributes listed in
``astropy.io.fits.column.KEYWORD_ATTRIBUTES``. The default is
``"all"`` which will print out all attributes. It forgives plurals
and blanks. If there are two or more attribute names, they must be
separated by comma(s).
output : file-like, optional
File-like object to output to. Outputs to stdout by default.
If `False`, returns the attributes as a `dict` instead.
Notes
-----
This function doesn't return anything by default; it just prints to
stdout.
"""
if output is None:
output = sys.stdout
if attrib.strip().lower() in ["all", ""]:
lst = KEYWORD_ATTRIBUTES
else:
lst = attrib.split(",")
for idx in range(len(lst)):
lst[idx] = lst[idx].strip().lower()
if lst[idx][-1] == "s":
lst[idx] = list[idx][:-1]
ret = {}
for attr in lst:
if output:
if attr not in KEYWORD_ATTRIBUTES:
output.write(
f"'{attr}' is not an attribute of the column definitions.\n"
)
continue
output.write(f"{attr}:\n")
output.write(f" {getattr(self, attr + 's')}\n")
else:
ret[attr] = getattr(self, attr + "s")
if not output:
return ret
class _AsciiColDefs(ColDefs):
"""ColDefs implementation for ASCII tables."""
_padding_byte = " "
_col_format_cls = _AsciiColumnFormat
def __init__(self, input, ascii=True):
super().__init__(input)
# if the format of an ASCII column has no width, add one
if not isinstance(input, _AsciiColDefs):
self._update_field_metrics()
else:
for idx, s in enumerate(input.starts):
self.columns[idx].start = s
self._spans = input.spans
self._width = input._width
@lazyproperty
def dtype(self):
dtype = {}
for j in range(len(self)):
data_type = "S" + str(self.spans[j])
dtype[self.names[j]] = (data_type, self.starts[j] - 1)
return np.dtype(dtype)
@property
def spans(self):
"""A list of the widths of each field in the table."""
return self._spans
@lazyproperty
def _recformats(self):
if len(self) == 1:
widths = []
else:
widths = [y - x for x, y in pairwise(self.starts)]
# Widths is the width of each field *including* any space between
# fields; this is so that we can map the fields to string records in a
# Numpy recarray
widths.append(self._width - self.starts[-1] + 1)
return ["a" + str(w) for w in widths]
def add_col(self, column):
super().add_col(column)
self._update_field_metrics()
def del_col(self, col_name):
super().del_col(col_name)
self._update_field_metrics()
def _update_field_metrics(self):
"""
Updates the list of the start columns, the list of the widths of each
field, and the total width of each record in the table.
"""
spans = [0] * len(self.columns)
end_col = 0 # Refers to the ASCII text column, not the table col
for idx, col in enumerate(self.columns):
width = col.format.width
# Update the start columns and column span widths taking into
# account the case that the starting column of a field may not
# be the column immediately after the previous field
if not col.start:
col.start = end_col + 1
end_col = col.start + width - 1
spans[idx] = width
self._spans = spans
self._width = end_col
# Utilities
class _VLF(np.ndarray):
"""Variable length field object."""
def __new__(cls, input, dtype="a"):
"""
Parameters
----------
input
a sequence of variable-sized elements.
"""
if dtype == "a":
try:
# this handles ['abc'] and [['a','b','c']]
# equally, beautiful!
input = [chararray.array(x, itemsize=1) for x in input]
except Exception:
raise ValueError(f"Inconsistent input data array: {input}")
a = np.array(input, dtype=object)
self = np.ndarray.__new__(cls, shape=(len(input),), buffer=a, dtype=object)
self.max = 0
self.element_dtype = dtype
return self
def __array_finalize__(self, obj):
if obj is None:
return
self.max = obj.max
self.element_dtype = obj.element_dtype
def __setitem__(self, key, value):
"""
To make sure the new item has consistent data type to avoid
misalignment.
"""
if isinstance(value, np.ndarray) and value.dtype == self.dtype:
pass
elif isinstance(value, chararray.chararray) and value.itemsize == 1:
pass
elif self.element_dtype == "a":
value = chararray.array(value, itemsize=1)
else:
value = np.array(value, dtype=self.element_dtype)
np.ndarray.__setitem__(self, key, value)
nelem = value.shape
len_value = np.prod(nelem)
self.max = max(self.max, len_value)
def tolist(self):
return [list(item) for item in super().tolist()]
def _get_index(names, key):
"""
Get the index of the ``key`` in the ``names`` list.
The ``key`` can be an integer or string. If integer, it is the index
in the list. If string,
a. Field (column) names are case sensitive: you can have two
different columns called 'abc' and 'ABC' respectively.
b. When you *refer* to a field (presumably with the field
method), it will try to match the exact name first, so in
the example in (a), field('abc') will get the first field,
and field('ABC') will get the second field.
If there is no exact name matched, it will try to match the
name with case insensitivity. So, in the last example,
field('Abc') will cause an exception since there is no unique
mapping. If there is a field named "XYZ" and no other field
name is a case variant of "XYZ", then field('xyz'),
field('Xyz'), etc. will get this field.
"""
if _is_int(key):
indx = int(key)
elif isinstance(key, str):
# try to find exact match first
try:
indx = names.index(key.rstrip())
except ValueError:
# try to match case-insentively,
_key = key.lower().rstrip()
names = [n.lower().rstrip() for n in names]
count = names.count(_key) # occurrence of _key in names
if count == 1:
indx = names.index(_key)
elif count == 0:
raise KeyError(f"Key '{key}' does not exist.")
else: # multiple match
raise KeyError(f"Ambiguous key name '{key}'.")
else:
raise KeyError(f"Illegal key '{key!r}'.")
return indx
def _unwrapx(input, output, repeat):
"""
Unwrap the X format column into a Boolean array.
Parameters
----------
input
input ``Uint8`` array of shape (`s`, `nbytes`)
output
output Boolean array of shape (`s`, `repeat`)
repeat
number of bits
"""
pow2 = np.array([128, 64, 32, 16, 8, 4, 2, 1], dtype="uint8")
nbytes = ((repeat - 1) // 8) + 1
for i in range(nbytes):
_min = i * 8
_max = min((i + 1) * 8, repeat)
for j in range(_min, _max):
output[..., j] = np.bitwise_and(input[..., i], pow2[j - i * 8])
def _wrapx(input, output, repeat):
"""
Wrap the X format column Boolean array into an ``UInt8`` array.
Parameters
----------
input
input Boolean array of shape (`s`, `repeat`)
output
output ``Uint8`` array of shape (`s`, `nbytes`)
repeat
number of bits
"""
output[...] = 0 # reset the output
nbytes = ((repeat - 1) // 8) + 1
unused = nbytes * 8 - repeat
for i in range(nbytes):
_min = i * 8
_max = min((i + 1) * 8, repeat)
for j in range(_min, _max):
if j != _min:
np.left_shift(output[..., i], 1, output[..., i])
np.add(output[..., i], input[..., j], output[..., i])
# shift the unused bits
np.left_shift(output[..., i], unused, output[..., i])
def _makep(array, descr_output, format, nrows=None):
"""
Construct the P (or Q) format column array, both the data descriptors and
the data. It returns the output "data" array of data type `dtype`.
The descriptor location will have a zero offset for all columns
after this call. The final offset will be calculated when the file
is written.
Parameters
----------
array
input object array
descr_output
output "descriptor" array of data type int32 (for P format arrays) or
int64 (for Q format arrays)--must be nrows long in its first dimension
format
the _FormatP object representing the format of the variable array
nrows : int, optional
number of rows to create in the column; defaults to the number of rows
in the input array
"""
# TODO: A great deal of this is redundant with FITS_rec._convert_p; see if
# we can merge the two somehow.
_offset = 0
if not nrows:
nrows = len(array)
data_output = _VLF([None] * nrows, dtype=format.dtype)
if format.dtype == "a":
_nbytes = 1
else:
_nbytes = np.array([], dtype=format.dtype).itemsize
for idx in range(nrows):
if idx < len(array):
rowval = array[idx]
else:
if format.dtype == "a":
rowval = " " * data_output.max
else:
rowval = [0] * data_output.max
if format.dtype == "a":
data_output[idx] = chararray.array(encode_ascii(rowval), itemsize=1)
else:
data_output[idx] = np.array(rowval, dtype=format.dtype)
nelem = data_output[idx].shape
descr_output[idx, 0] = np.prod(nelem)
descr_output[idx, 1] = _offset
_offset += descr_output[idx, 0] * _nbytes
return data_output
def _parse_tformat(tform):
"""Parse ``TFORMn`` keyword for a binary table into a
``(repeat, format, option)`` tuple.
"""
try:
(repeat, format, option) = TFORMAT_RE.match(tform.strip()).groups()
except Exception:
# TODO: Maybe catch this error use a default type (bytes, maybe?) for
# unrecognized column types. As long as we can determine the correct
# byte width somehow..
raise VerifyError(f"Format {tform!r} is not recognized.")
if repeat == "":
repeat = 1
else:
repeat = int(repeat)
return (repeat, format.upper(), option)
def _parse_ascii_tformat(tform, strict=False):
"""
Parse the ``TFORMn`` keywords for ASCII tables into a ``(format, width,
precision)`` tuple (the latter is always zero unless format is one of 'E',
'F', or 'D').
"""
match = TFORMAT_ASCII_RE.match(tform.strip())
if not match:
raise VerifyError(f"Format {tform!r} is not recognized.")
# Be flexible on case
format = match.group("format")
if format is None:
# Floating point format
format = match.group("formatf").upper()
width = match.group("widthf")
precision = match.group("precision")
if width is None or precision is None:
if strict:
raise VerifyError(
"Format {!r} is not unambiguously an ASCII table format."
)
else:
width = 0 if width is None else width
precision = 1 if precision is None else precision
else:
format = format.upper()
width = match.group("width")
if width is None:
if strict:
raise VerifyError(
"Format {!r} is not unambiguously an ASCII table format."
)
else:
# Just use a default width of 0 if unspecified
width = 0
precision = 0
def convert_int(val):
msg = (
"Format {!r} is not valid--field width and decimal precision "
"must be integers."
)
try:
val = int(val)
except (ValueError, TypeError):
raise VerifyError(msg.format(tform))
return val
if width and precision:
# This should only be the case for floating-point formats
width, precision = convert_int(width), convert_int(precision)
elif width:
# Just for integer/string formats; ignore precision
width = convert_int(width)
else:
# For any format, if width was unspecified use the set defaults
width, precision = ASCII_DEFAULT_WIDTHS[format]
if width <= 0:
raise VerifyError(
f"Format {tform!r} not valid--field width must be a positive integeter."
)
if precision >= width:
raise VerifyError(
"Format {!r} not valid--the number of decimal digits "
"must be less than the format's total "
"width {}.".format(tform, width)
)
return format, width, precision
def _parse_tdim(tdim):
"""Parse the ``TDIM`` value into a tuple (may return an empty tuple if
the value ``TDIM`` value is empty or invalid).
"""
m = tdim and TDIM_RE.match(tdim)
if m:
dims = m.group("dims")
return tuple(int(d.strip()) for d in dims.split(","))[::-1]
# Ignore any dim values that don't specify a multidimensional column
return tuple()
def _scalar_to_format(value):
"""
Given a scalar value or string, returns the minimum FITS column format
that can represent that value. 'minimum' is defined by the order given in
FORMATORDER.
"""
# First, if value is a string, try to convert to the appropriate scalar
# value
for type_ in (int, float, complex):
try:
value = type_(value)
break
except ValueError:
continue
numpy_dtype_str = np.min_scalar_type(value).str
numpy_dtype_str = numpy_dtype_str[1:] # Strip endianness
try:
fits_format = NUMPY2FITS[numpy_dtype_str]
return FITSUPCONVERTERS.get(fits_format, fits_format)
except KeyError:
return "A" + str(len(value))
def _cmp_recformats(f1, f2):
"""
Compares two numpy recformats using the ordering given by FORMATORDER.
"""
if f1[0] == "a" and f2[0] == "a":
return cmp(int(f1[1:]), int(f2[1:]))
else:
f1, f2 = NUMPY2FITS[f1], NUMPY2FITS[f2]
return cmp(FORMATORDER.index(f1), FORMATORDER.index(f2))
def _convert_fits2record(format):
"""
Convert FITS format spec to record format spec.
"""
repeat, dtype, option = _parse_tformat(format)
if dtype in FITS2NUMPY:
if dtype == "A":
output_format = FITS2NUMPY[dtype] + str(repeat)
# to accommodate both the ASCII table and binary table column
# format spec, i.e. A7 in ASCII table is the same as 7A in
# binary table, so both will produce 'a7'.
# Technically the FITS standard does not allow this but it's a very
# common mistake
if format.lstrip()[0] == "A" and option != "":
# make sure option is integer
output_format = FITS2NUMPY[dtype] + str(int(option))
else:
repeat_str = ""
if repeat != 1:
repeat_str = str(repeat)
output_format = repeat_str + FITS2NUMPY[dtype]
elif dtype == "X":
output_format = _FormatX(repeat)
elif dtype == "P":
output_format = _FormatP.from_tform(format)
elif dtype == "Q":
output_format = _FormatQ.from_tform(format)
elif dtype == "F":
output_format = "f8"
else:
raise ValueError(f"Illegal format `{format}`.")
return output_format
def _convert_record2fits(format):
"""
Convert record format spec to FITS format spec.
"""
recformat, kind, dtype = _dtype_to_recformat(format)
shape = dtype.shape
itemsize = dtype.base.itemsize
if dtype.char == "U" or (
dtype.subdtype is not None and dtype.subdtype[0].char == "U"
):
# Unicode dtype--itemsize is 4 times actual ASCII character length,
# which what matters for FITS column formats
# Use dtype.base and dtype.subdtype --dtype for multi-dimensional items
itemsize = itemsize // 4
option = str(itemsize)
ndims = len(shape)
repeat = 1
if ndims > 0:
nel = np.array(shape, dtype="i8").prod()
if nel > 1:
repeat = nel
if kind == "a":
# This is a kludge that will place string arrays into a
# single field, so at least we won't lose data. Need to
# use a TDIM keyword to fix this, declaring as (slength,
# dim1, dim2, ...) as mwrfits does
ntot = int(repeat) * int(option)
output_format = str(ntot) + "A"
elif recformat in NUMPY2FITS: # record format
if repeat != 1:
repeat = str(repeat)
else:
repeat = ""
output_format = repeat + NUMPY2FITS[recformat]
else:
raise ValueError(f"Illegal format `{format}`.")
return output_format
def _dtype_to_recformat(dtype):
"""
Utility function for converting a dtype object or string that instantiates
a dtype (e.g. 'float32') into one of the two character Numpy format codes
that have been traditionally used by Astropy.
In particular, use of 'a' to refer to character data is long since
deprecated in Numpy, but Astropy remains heavily invested in its use
(something to try to get away from sooner rather than later).
"""
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
kind = dtype.base.kind
if kind in ("U", "S"):
recformat = kind = "a"
else:
itemsize = dtype.base.itemsize
recformat = kind + str(itemsize)
return recformat, kind, dtype
def _convert_format(format, reverse=False):
"""
Convert FITS format spec to record format spec. Do the opposite if
reverse=True.
"""
if reverse:
return _convert_record2fits(format)
else:
return _convert_fits2record(format)
def _convert_ascii_format(format, reverse=False):
"""Convert ASCII table format spec to record format spec."""
if reverse:
recformat, kind, dtype = _dtype_to_recformat(format)
itemsize = dtype.itemsize
if kind == "a":
return "A" + str(itemsize)
elif NUMPY2FITS.get(recformat) == "L":
# Special case for logical/boolean types--for ASCII tables we
# represent these as single character columns containing 'T' or 'F'
# (a la the storage format for Logical columns in binary tables)
return "A1"
elif kind == "i":
# Use for the width the maximum required to represent integers
# of that byte size plus 1 for signs, but use a minimum of the
# default width (to keep with existing behavior)
width = 1 + len(str(2 ** (itemsize * 8)))
width = max(width, ASCII_DEFAULT_WIDTHS["I"][0])
return "I" + str(width)
elif kind == "f":
# This is tricky, but go ahead and use D if float-64, and E
# if float-32 with their default widths
if itemsize >= 8:
format = "D"
else:
format = "E"
width = ".".join(str(w) for w in ASCII_DEFAULT_WIDTHS[format])
return format + width
# TODO: There may be reasonable ways to represent other Numpy types so
# let's see what other possibilities there are besides just 'a', 'i',
# and 'f'. If it doesn't have a reasonable ASCII representation then
# raise an exception
else:
format, width, precision = _parse_ascii_tformat(format)
# This gives a sensible "default" dtype for a given ASCII
# format code
recformat = ASCII2NUMPY[format]
# The following logic is taken from CFITSIO:
# For integers, if the width <= 4 we can safely use 16-bit ints for all
# values, if width >= 10 we may need to accommodate 64-bit ints.
# values [for the non-standard J format code just always force 64-bit]
if format == "I":
if width <= 4:
recformat = "i2"
elif width > 9:
recformat = "i8"
elif format == "A":
recformat += str(width)
return recformat
def _parse_tdisp_format(tdisp):
"""
Parse the ``TDISPn`` keywords for ASCII and binary tables into a
``(format, width, precision, exponential)`` tuple (the TDISP values
for ASCII and binary are identical except for 'Lw',
which is only present in BINTABLE extensions.
Parameters
----------
tdisp : str
TDISPn FITS Header keyword. Used to specify display formatting.
Returns
-------
formatc: str
The format characters from TDISPn
width: str
The width int value from TDISPn
precision: str
The precision int value from TDISPn
exponential: str
The exponential int value from TDISPn
"""
# Use appropriate regex for format type
tdisp = tdisp.strip()
fmt_key = (
tdisp[0]
if tdisp[0] != "E" or (len(tdisp) > 1 and tdisp[1] not in "NS")
else tdisp[:2]
)
try:
tdisp_re = TDISP_RE_DICT[fmt_key]
except KeyError:
raise VerifyError(f"Format {tdisp} is not recognized.")
match = tdisp_re.match(tdisp.strip())
if not match or match.group("formatc") is None:
raise VerifyError(f"Format {tdisp} is not recognized.")
formatc = match.group("formatc")
width = match.group("width")
precision = None
exponential = None
# Some formats have precision and exponential
if tdisp[0] in ("I", "B", "O", "Z", "F", "E", "G", "D"):
precision = match.group("precision")
if precision is None:
precision = 1
if tdisp[0] in ("E", "D", "G") and tdisp[1] not in ("N", "S"):
exponential = match.group("exponential")
if exponential is None:
exponential = 1
# Once parsed, check format dict to do conversion to a formatting string
return formatc, width, precision, exponential
def _fortran_to_python_format(tdisp):
"""
Turn the TDISPn fortran format pieces into a final Python format string.
See the format_type definitions above the TDISP_FMT_DICT. If codes is
changed to take advantage of the exponential specification, will need to
add it as another input parameter.
Parameters
----------
tdisp : str
TDISPn FITS Header keyword. Used to specify display formatting.
Returns
-------
format_string: str
The TDISPn keyword string translated into a Python format string.
"""
format_type, width, precision, exponential = _parse_tdisp_format(tdisp)
try:
fmt = TDISP_FMT_DICT[format_type]
return fmt.format(width=width, precision=precision)
except KeyError:
raise VerifyError(f"Format {format_type} is not recognized.")
def python_to_tdisp(format_string, logical_dtype=False):
"""
Turn the Python format string to a TDISP FITS compliant format string. Not
all formats convert. these will cause a Warning and return None.
Parameters
----------
format_string : str
TDISPn FITS Header keyword. Used to specify display formatting.
logical_dtype : bool
True is this format type should be a logical type, 'L'. Needs special
handling.
Returns
-------
tdsip_string: str
The TDISPn keyword string translated into a Python format string.
"""
fmt_to_tdisp = {
"a": "A",
"s": "A",
"d": "I",
"b": "B",
"o": "O",
"x": "Z",
"X": "Z",
"f": "F",
"F": "F",
"g": "G",
"G": "G",
"e": "E",
"E": "E",
}
if format_string in [None, "", "{}"]:
return None
# Strip out extra format characters that aren't a type or a width/precision
if format_string[0] == "{" and format_string != "{}":
fmt_str = format_string.lstrip("{:").rstrip("}")
elif format_string[0] == "%":
fmt_str = format_string.lstrip("%")
else:
fmt_str = format_string
precision, sep = "", ""
# Character format, only translate right aligned, and don't take zero fills
if fmt_str[-1].isdigit() and fmt_str[0] == ">" and fmt_str[1] != "0":
ftype = fmt_to_tdisp["a"]
width = fmt_str[1:]
elif fmt_str[-1] == "s" and fmt_str != "s":
ftype = fmt_to_tdisp["a"]
width = fmt_str[:-1].lstrip("0")
# Number formats, don't take zero fills
elif fmt_str[-1].isalpha() and len(fmt_str) > 1 and fmt_str[0] != "0":
ftype = fmt_to_tdisp[fmt_str[-1]]
fmt_str = fmt_str[:-1]
# If format has a "." split out the width and precision
if "." in fmt_str:
width, precision = fmt_str.split(".")
sep = "."
if width == "":
ascii_key = ftype if ftype != "G" else "F"
width = str(
int(precision)
+ (
ASCII_DEFAULT_WIDTHS[ascii_key][0]
- ASCII_DEFAULT_WIDTHS[ascii_key][1]
)
)
# Otherwise we just have a width
else:
width = fmt_str
else:
warnings.warn(
"Format {} cannot be mapped to the accepted "
"TDISPn keyword values. Format will not be "
"moved into TDISPn keyword.".format(format_string),
AstropyUserWarning,
)
return None
# Catch logical data type, set the format type back to L in this case
if logical_dtype:
ftype = "L"
return ftype + width + sep + precision
|
44b5998d438a47640cdcedb02cbbf4e6cf81726ee157f8b8efa26595ee457794 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import collections
import copy
import itertools
import numbers
import os
import re
import warnings
from astropy.utils import isiterable
from astropy.utils.exceptions import AstropyUserWarning
from ._utils import parse_header
from .card import KEYWORD_LENGTH, UNDEFINED, Card, _pad
from .file import _File
from .util import (
decode_ascii,
encode_ascii,
fileobj_closed,
fileobj_is_binary,
path_like,
)
BLOCK_SIZE = 2880 # the FITS block size
# This regular expression can match a *valid* END card which just consists of
# the string 'END' followed by all spaces, or an *invalid* end card which
# consists of END, followed by any character that is *not* a valid character
# for a valid FITS keyword (that is, this is not a keyword like 'ENDER' which
# starts with 'END' but is not 'END'), followed by any arbitrary bytes. An
# invalid end card may also consist of just 'END' with no trailing bytes.
HEADER_END_RE = re.compile(
encode_ascii(r"(?:(?P<valid>END {77}) *)|(?P<invalid>END$|END {0,76}[^A-Z0-9_-])")
)
# According to the FITS standard the only characters that may appear in a
# header record are the restricted ASCII chars from 0x20 through 0x7E.
VALID_HEADER_CHARS = set(map(chr, range(0x20, 0x7F)))
END_CARD = "END" + " " * 77
__doctest_skip__ = [
"Header",
"Header.comments",
"Header.fromtextfile",
"Header.totextfile",
"Header.set",
"Header.update",
]
class Header:
"""
FITS header class. This class exposes both a dict-like interface and a
list-like interface to FITS headers.
The header may be indexed by keyword and, like a dict, the associated value
will be returned. When the header contains cards with duplicate keywords,
only the value of the first card with the given keyword will be returned.
It is also possible to use a 2-tuple as the index in the form (keyword,
n)--this returns the n-th value with that keyword, in the case where there
are duplicate keywords.
For example::
>>> header['NAXIS']
0
>>> header[('FOO', 1)] # Return the value of the second FOO keyword
'foo'
The header may also be indexed by card number::
>>> header[0] # Return the value of the first card in the header
'T'
Commentary keywords such as HISTORY and COMMENT are special cases: When
indexing the Header object with either 'HISTORY' or 'COMMENT' a list of all
the HISTORY/COMMENT values is returned::
>>> header['HISTORY']
This is the first history entry in this header.
This is the second history entry in this header.
...
See the Astropy documentation for more details on working with headers.
Notes
-----
Although FITS keywords must be exclusively upper case, retrieving an item
in a `Header` object is case insensitive.
"""
def __init__(self, cards=[], copy=False):
"""
Construct a `Header` from an iterable and/or text file.
Parameters
----------
cards : list of `Card`, optional
The cards to initialize the header with. Also allowed are other
`Header` (or `dict`-like) objects.
.. versionchanged:: 1.2
Allowed ``cards`` to be a `dict`-like object.
copy : bool, optional
If ``True`` copies the ``cards`` if they were another `Header`
instance.
Default is ``False``.
.. versionadded:: 1.3
"""
self.clear()
if isinstance(cards, Header):
if copy:
cards = cards.copy()
cards = cards.cards
elif isinstance(cards, dict):
cards = cards.items()
for card in cards:
self.append(card, end=True)
self._modified = False
def __len__(self):
return len(self._cards)
def __iter__(self):
for card in self._cards:
yield card.keyword
def __contains__(self, keyword):
if keyword in self._keyword_indices or keyword in self._rvkc_indices:
# For the most common case (single, standard form keyword lookup)
# this will work and is an O(1) check. If it fails that doesn't
# guarantee absence, just that we have to perform the full set of
# checks in self._cardindex
return True
try:
self._cardindex(keyword)
except (KeyError, IndexError):
return False
return True
def __getitem__(self, key):
if isinstance(key, slice):
return self.__class__([copy.copy(c) for c in self._cards[key]])
elif self._haswildcard(key):
return self.__class__(
[copy.copy(self._cards[idx]) for idx in self._wildcardmatch(key)]
)
elif isinstance(key, str):
key = key.strip()
if key.upper() in Card._commentary_keywords:
key = key.upper()
# Special case for commentary cards
return _HeaderCommentaryCards(self, key)
if isinstance(key, tuple):
keyword = key[0]
else:
keyword = key
card = self._cards[self._cardindex(key)]
if card.field_specifier is not None and keyword == card.rawkeyword:
# This is RVKC; if only the top-level keyword was specified return
# the raw value, not the parsed out float value
return card.rawvalue
value = card.value
if value == UNDEFINED:
return None
return value
def __setitem__(self, key, value):
if self._set_slice(key, value, self):
return
if isinstance(value, tuple):
if len(value) > 2:
raise ValueError(
"A Header item may be set with either a scalar value, "
"a 1-tuple containing a scalar value, or a 2-tuple "
"containing a scalar value and comment string."
)
if len(value) == 1:
value, comment = value[0], None
if value is None:
value = UNDEFINED
elif len(value) == 2:
value, comment = value
if value is None:
value = UNDEFINED
if comment is None:
comment = ""
else:
comment = None
card = None
if isinstance(key, numbers.Integral):
card = self._cards[key]
elif isinstance(key, tuple):
card = self._cards[self._cardindex(key)]
if value is None:
value = UNDEFINED
if card:
card.value = value
if comment is not None:
card.comment = comment
if card._modified:
self._modified = True
else:
# If we get an IndexError that should be raised; we don't allow
# assignment to non-existing indices
self._update((key, value, comment))
def __delitem__(self, key):
if isinstance(key, slice) or self._haswildcard(key):
# This is very inefficient but it's not a commonly used feature.
# If someone out there complains that they make heavy use of slice
# deletions and it's too slow, well, we can worry about it then
# [the solution is not too complicated--it would be wait 'til all
# the cards are deleted before updating _keyword_indices rather
# than updating it once for each card that gets deleted]
if isinstance(key, slice):
indices = range(*key.indices(len(self)))
# If the slice step is backwards we want to reverse it, because
# it will be reversed in a few lines...
if key.step and key.step < 0:
indices = reversed(indices)
else:
indices = self._wildcardmatch(key)
for idx in reversed(indices):
del self[idx]
return
elif isinstance(key, str):
# delete ALL cards with the same keyword name
key = Card.normalize_keyword(key)
indices = self._keyword_indices
if key not in self._keyword_indices:
indices = self._rvkc_indices
if key not in indices:
# if keyword is not present raise KeyError.
# To delete keyword without caring if they were present,
# Header.remove(Keyword) can be used with optional argument ignore_missing as True
raise KeyError(f"Keyword '{key}' not found.")
for idx in reversed(indices[key]):
# Have to copy the indices list since it will be modified below
del self[idx]
return
idx = self._cardindex(key)
card = self._cards[idx]
keyword = card.keyword
del self._cards[idx]
keyword = Card.normalize_keyword(keyword)
indices = self._keyword_indices[keyword]
indices.remove(idx)
if not indices:
del self._keyword_indices[keyword]
# Also update RVKC indices if necessary :/
if card.field_specifier is not None:
indices = self._rvkc_indices[card.rawkeyword]
indices.remove(idx)
if not indices:
del self._rvkc_indices[card.rawkeyword]
# We also need to update all other indices
self._updateindices(idx, increment=False)
self._modified = True
def __repr__(self):
return self.tostring(sep="\n", endcard=False, padding=False)
def __str__(self):
return self.tostring()
def __eq__(self, other):
"""
Two Headers are equal only if they have the exact same string
representation.
"""
return str(self) == str(other)
def __add__(self, other):
temp = self.copy(strip=False)
temp.extend(other)
return temp
def __iadd__(self, other):
self.extend(other)
return self
def _ipython_key_completions_(self):
return self.__iter__()
@property
def cards(self):
"""
The underlying physical cards that make up this Header; it can be
looked at, but it should not be modified directly.
"""
return _CardAccessor(self)
@property
def comments(self):
"""
View the comments associated with each keyword, if any.
For example, to see the comment on the NAXIS keyword:
>>> header.comments['NAXIS']
number of data axes
Comments can also be updated through this interface:
>>> header.comments['NAXIS'] = 'Number of data axes'
"""
return _HeaderComments(self)
@property
def _modified(self):
"""
Whether or not the header has been modified; this is a property so that
it can also check each card for modifications--cards may have been
modified directly without the header containing it otherwise knowing.
"""
modified_cards = any(c._modified for c in self._cards)
if modified_cards:
# If any cards were modified then by definition the header was
# modified
self.__dict__["_modified"] = True
return self.__dict__["_modified"]
@_modified.setter
def _modified(self, val):
self.__dict__["_modified"] = val
@classmethod
def fromstring(cls, data, sep=""):
"""
Creates an HDU header from a byte string containing the entire header
data.
Parameters
----------
data : str or bytes
String or bytes containing the entire header. In the case of bytes
they will be decoded using latin-1 (only plain ASCII characters are
allowed in FITS headers but latin-1 allows us to retain any invalid
bytes that might appear in malformatted FITS files).
sep : str, optional
The string separating cards from each other, such as a newline. By
default there is no card separator (as is the case in a raw FITS
file). In general this is only used in cases where a header was
printed as text (e.g. with newlines after each card) and you want
to create a new `Header` from it by copy/pasting.
Examples
--------
>>> from astropy.io.fits import Header
>>> hdr = Header({'SIMPLE': True})
>>> Header.fromstring(hdr.tostring()) == hdr
True
If you want to create a `Header` from printed text it's not necessary
to have the exact binary structure as it would appear in a FITS file,
with the full 80 byte card length. Rather, each "card" can end in a
newline and does not have to be padded out to a full card length as
long as it "looks like" a FITS header:
>>> hdr = Header.fromstring(\"\"\"\\
... SIMPLE = T / conforms to FITS standard
... BITPIX = 8 / array data type
... NAXIS = 0 / number of array dimensions
... EXTEND = T
... \"\"\", sep='\\n')
>>> hdr['SIMPLE']
True
>>> hdr['BITPIX']
8
>>> len(hdr)
4
Returns
-------
`Header`
A new `Header` instance.
"""
cards = []
# If the card separator contains characters that may validly appear in
# a card, the only way to unambiguously distinguish between cards is to
# require that they be Card.length long. However, if the separator
# contains non-valid characters (namely \n) the cards may be split
# immediately at the separator
require_full_cardlength = set(sep).issubset(VALID_HEADER_CHARS)
if isinstance(data, bytes):
# FITS supports only ASCII, but decode as latin1 and just take all
# bytes for now; if it results in mojibake due to e.g. UTF-8
# encoded data in a FITS header that's OK because it shouldn't be
# there in the first place--accepting it here still gives us the
# opportunity to display warnings later during validation
CONTINUE = b"CONTINUE"
END = b"END"
end_card = END_CARD.encode("ascii")
sep = sep.encode("latin1")
empty = b""
else:
CONTINUE = "CONTINUE"
END = "END"
end_card = END_CARD
empty = ""
# Split the header into individual cards
idx = 0
image = []
while idx < len(data):
if require_full_cardlength:
end_idx = idx + Card.length
else:
try:
end_idx = data.index(sep, idx)
except ValueError:
end_idx = len(data)
next_image = data[idx:end_idx]
idx = end_idx + len(sep)
if image:
if next_image[:8] == CONTINUE:
image.append(next_image)
continue
cards.append(Card.fromstring(empty.join(image)))
if require_full_cardlength:
if next_image == end_card:
image = []
break
else:
if next_image.split(sep)[0].rstrip() == END:
image = []
break
image = [next_image]
# Add the last image that was found before the end, if any
if image:
cards.append(Card.fromstring(empty.join(image)))
return cls._fromcards(cards)
@classmethod
def fromfile(cls, fileobj, sep="", endcard=True, padding=True):
"""
Similar to :meth:`Header.fromstring`, but reads the header string from
a given file-like object or filename.
Parameters
----------
fileobj : str, file-like
A filename or an open file-like object from which a FITS header is
to be read. For open file handles the file pointer must be at the
beginning of the header.
sep : str, optional
The string separating cards from each other, such as a newline. By
default there is no card separator (as is the case in a raw FITS
file).
endcard : bool, optional
If True (the default) the header must end with an END card in order
to be considered valid. If an END card is not found an
`OSError` is raised.
padding : bool, optional
If True (the default) the header will be required to be padded out
to a multiple of 2880, the FITS header block size. Otherwise any
padding, or lack thereof, is ignored.
Returns
-------
`Header`
A new `Header` instance.
"""
close_file = False
if isinstance(fileobj, path_like):
# If sep is non-empty we are trying to read a header printed to a
# text file, so open in text mode by default to support newline
# handling; if a binary-mode file object is passed in, the user is
# then on their own w.r.t. newline handling.
#
# Otherwise assume we are reading from an actual FITS file and open
# in binary mode.
fileobj = os.path.expanduser(fileobj)
if sep:
fileobj = open(fileobj, encoding="latin1")
else:
fileobj = open(fileobj, "rb")
close_file = True
try:
is_binary = fileobj_is_binary(fileobj)
def block_iter(nbytes):
while True:
data = fileobj.read(nbytes)
if data:
yield data
else:
break
return cls._from_blocks(block_iter, is_binary, sep, endcard, padding)[1]
finally:
if close_file:
fileobj.close()
@classmethod
def _fromcards(cls, cards):
header = cls()
for idx, card in enumerate(cards):
header._cards.append(card)
keyword = Card.normalize_keyword(card.keyword)
header._keyword_indices[keyword].append(idx)
if card.field_specifier is not None:
header._rvkc_indices[card.rawkeyword].append(idx)
header._modified = False
return header
@classmethod
def _from_blocks(cls, block_iter, is_binary, sep, endcard, padding):
"""
The meat of `Header.fromfile`; in a separate method so that
`Header.fromfile` itself is just responsible for wrapping file
handling. Also used by `_BaseHDU.fromstring`.
``block_iter`` should be a callable which, given a block size n
(typically 2880 bytes as used by the FITS standard) returns an iterator
of byte strings of that block size.
``is_binary`` specifies whether the returned blocks are bytes or text
Returns both the entire header *string*, and the `Header` object
returned by Header.fromstring on that string.
"""
actual_block_size = _block_size(sep)
clen = Card.length + len(sep)
blocks = block_iter(actual_block_size)
# Read the first header block.
try:
block = next(blocks)
except StopIteration:
raise EOFError()
if not is_binary:
# TODO: There needs to be error handling at *this* level for
# non-ASCII characters; maybe at this stage decoding latin-1 might
# be safer
block = encode_ascii(block)
read_blocks = []
is_eof = False
end_found = False
# continue reading header blocks until END card or EOF is reached
while True:
# find the END card
end_found, block = cls._find_end_card(block, clen)
read_blocks.append(decode_ascii(block))
if end_found:
break
try:
block = next(blocks)
except StopIteration:
is_eof = True
break
if not block:
is_eof = True
break
if not is_binary:
block = encode_ascii(block)
header_str = "".join(read_blocks)
_check_padding(header_str, actual_block_size, is_eof, check_block_size=padding)
if not end_found and is_eof and endcard:
# TODO: Pass this error to validation framework as an ERROR,
# rather than raising an exception
raise OSError("Header missing END card.")
return header_str, cls.fromstring(header_str, sep=sep)
@classmethod
def _find_end_card(cls, block, card_len):
"""
Utility method to search a header block for the END card and handle
invalid END cards.
This method can also returned a modified copy of the input header block
in case an invalid end card needs to be sanitized.
"""
for mo in HEADER_END_RE.finditer(block):
# Ensure the END card was found, and it started on the
# boundary of a new card (see ticket #142)
if mo.start() % card_len != 0:
continue
# This must be the last header block, otherwise the
# file is malformatted
if mo.group("invalid"):
offset = mo.start()
trailing = block[offset + 3 : offset + card_len - 3].rstrip()
if trailing:
trailing = repr(trailing).lstrip("ub")
# TODO: Pass this warning up to the validation framework
warnings.warn(
"Unexpected bytes trailing END keyword: {}; these "
"bytes will be replaced with spaces on write.".format(trailing),
AstropyUserWarning,
)
else:
# TODO: Pass this warning up to the validation framework
warnings.warn(
"Missing padding to end of the FITS block after the "
"END keyword; additional spaces will be appended to "
"the file upon writing to pad out to {} "
"bytes.".format(BLOCK_SIZE),
AstropyUserWarning,
)
# Sanitize out invalid END card now that the appropriate
# warnings have been issued
block = (
block[:offset]
+ encode_ascii(END_CARD)
+ block[offset + len(END_CARD) :]
)
return True, block
return False, block
def tostring(self, sep="", endcard=True, padding=True):
r"""
Returns a string representation of the header.
By default this uses no separator between cards, adds the END card, and
pads the string with spaces to the next multiple of 2880 bytes. That
is, it returns the header exactly as it would appear in a FITS file.
Parameters
----------
sep : str, optional
The character or string with which to separate cards. By default
there is no separator, but one could use ``'\\n'``, for example, to
separate each card with a new line
endcard : bool, optional
If True (default) adds the END card to the end of the header
string
padding : bool, optional
If True (default) pads the string with spaces out to the next
multiple of 2880 characters
Returns
-------
str
A string representing a FITS header.
"""
lines = []
for card in self._cards:
s = str(card)
# Cards with CONTINUE cards may be longer than 80 chars; so break
# them into multiple lines
while s:
lines.append(s[: Card.length])
s = s[Card.length :]
s = sep.join(lines)
if endcard:
s += sep + _pad("END")
if padding:
s += " " * _pad_length(len(s))
return s
def tofile(self, fileobj, sep="", endcard=True, padding=True, overwrite=False):
r"""
Writes the header to file or file-like object.
By default this writes the header exactly as it would be written to a
FITS file, with the END card included and padding to the next multiple
of 2880 bytes. However, aspects of this may be controlled.
Parameters
----------
fileobj : path-like or file-like, optional
Either the pathname of a file, or an open file handle or file-like
object.
sep : str, optional
The character or string with which to separate cards. By default
there is no separator, but one could use ``'\\n'``, for example, to
separate each card with a new line
endcard : bool, optional
If `True` (default) adds the END card to the end of the header
string
padding : bool, optional
If `True` (default) pads the string with spaces out to the next
multiple of 2880 characters
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
"""
close_file = fileobj_closed(fileobj)
if not isinstance(fileobj, _File):
fileobj = _File(fileobj, mode="ostream", overwrite=overwrite)
try:
blocks = self.tostring(sep=sep, endcard=endcard, padding=padding)
actual_block_size = _block_size(sep)
if padding and len(blocks) % actual_block_size != 0:
raise OSError(
"Header size ({}) is not a multiple of block size ({}).".format(
len(blocks) - actual_block_size + BLOCK_SIZE, BLOCK_SIZE
)
)
fileobj.flush()
fileobj.write(blocks.encode("ascii"))
fileobj.flush()
finally:
if close_file:
fileobj.close()
@classmethod
def fromtextfile(cls, fileobj, endcard=False):
"""
Read a header from a simple text file or file-like object.
Equivalent to::
>>> Header.fromfile(fileobj, sep='\\n', endcard=False,
... padding=False)
See Also
--------
fromfile
"""
return cls.fromfile(fileobj, sep="\n", endcard=endcard, padding=False)
def totextfile(self, fileobj, endcard=False, overwrite=False):
"""
Write the header as text to a file or a file-like object.
Equivalent to::
>>> Header.tofile(fileobj, sep='\\n', endcard=False,
... padding=False, overwrite=overwrite)
See Also
--------
tofile
"""
self.tofile(
fileobj, sep="\n", endcard=endcard, padding=False, overwrite=overwrite
)
def clear(self):
"""
Remove all cards from the header.
"""
self._cards = []
self._keyword_indices = collections.defaultdict(list)
self._rvkc_indices = collections.defaultdict(list)
def copy(self, strip=False):
"""
Make a copy of the :class:`Header`.
.. versionchanged:: 1.3
`copy.copy` and `copy.deepcopy` on a `Header` will call this
method.
Parameters
----------
strip : bool, optional
If `True`, strip any headers that are specific to one of the
standard HDU types, so that this header can be used in a different
HDU.
Returns
-------
`Header`
A new :class:`Header` instance.
"""
tmp = self.__class__(copy.copy(card) for card in self._cards)
if strip:
tmp.strip()
return tmp
def __copy__(self):
return self.copy()
def __deepcopy__(self, *args, **kwargs):
return self.copy()
@classmethod
def fromkeys(cls, iterable, value=None):
"""
Similar to :meth:`dict.fromkeys`--creates a new `Header` from an
iterable of keywords and an optional default value.
This method is not likely to be particularly useful for creating real
world FITS headers, but it is useful for testing.
Parameters
----------
iterable
Any iterable that returns strings representing FITS keywords.
value : optional
A default value to assign to each keyword; must be a valid type for
FITS keywords.
Returns
-------
`Header`
A new `Header` instance.
"""
d = cls()
if not isinstance(value, tuple):
value = (value,)
for key in iterable:
d.append((key,) + value)
return d
def get(self, key, default=None):
"""
Similar to :meth:`dict.get`--returns the value associated with keyword
in the header, or a default value if the keyword is not found.
Parameters
----------
key : str
A keyword that may or may not be in the header.
default : optional
A default value to return if the keyword is not found in the
header.
Returns
-------
value: str, number, complex, bool, or ``astropy.io.fits.card.Undefined``
The value associated with the given keyword, or the default value
if the keyword is not in the header.
"""
try:
return self[key]
except (KeyError, IndexError):
return default
def set(self, keyword, value=None, comment=None, before=None, after=None):
"""
Set the value and/or comment and/or position of a specified keyword.
If the keyword does not already exist in the header, a new keyword is
created in the specified position, or appended to the end of the header
if no position is specified.
This method is similar to :meth:`Header.update` prior to Astropy v0.1.
.. note::
It should be noted that ``header.set(keyword, value)`` and
``header.set(keyword, value, comment)`` are equivalent to
``header[keyword] = value`` and
``header[keyword] = (value, comment)`` respectively.
New keywords can also be inserted relative to existing keywords
using, for example::
>>> header.insert('NAXIS1', ('NAXIS', 2, 'Number of axes'))
to insert before an existing keyword, or::
>>> header.insert('NAXIS', ('NAXIS1', 4096), after=True)
to insert after an existing keyword.
The only advantage of using :meth:`Header.set` is that it
easily replaces the old usage of :meth:`Header.update` both
conceptually and in terms of function signature.
Parameters
----------
keyword : str
A header keyword
value : str, optional
The value to set for the given keyword; if None the existing value
is kept, but '' may be used to set a blank value
comment : str, optional
The comment to set for the given keyword; if None the existing
comment is kept, but ``''`` may be used to set a blank comment
before : str, int, optional
Name of the keyword, or index of the `Card` before which this card
should be located in the header. The argument ``before`` takes
precedence over ``after`` if both specified.
after : str, int, optional
Name of the keyword, or index of the `Card` after which this card
should be located in the header.
"""
# Create a temporary card that looks like the one being set; if the
# temporary card turns out to be a RVKC this will make it easier to
# deal with the idiosyncrasies thereof
# Don't try to make a temporary card though if they keyword looks like
# it might be a HIERARCH card or is otherwise invalid--this step is
# only for validating RVKCs.
if (
len(keyword) <= KEYWORD_LENGTH
and Card._keywd_FSC_RE.match(keyword)
and keyword not in self._keyword_indices
):
new_card = Card(keyword, value, comment)
new_keyword = new_card.keyword
else:
new_keyword = keyword
if new_keyword not in Card._commentary_keywords and new_keyword in self:
if comment is None:
comment = self.comments[keyword]
if value is None:
value = self[keyword]
self[keyword] = (value, comment)
if before is not None or after is not None:
card = self._cards[self._cardindex(keyword)]
self._relativeinsert(card, before=before, after=after, replace=True)
elif before is not None or after is not None:
self._relativeinsert((keyword, value, comment), before=before, after=after)
else:
self[keyword] = (value, comment)
def items(self):
"""Like :meth:`dict.items`."""
for card in self._cards:
yield card.keyword, None if card.value == UNDEFINED else card.value
def keys(self):
"""
Like :meth:`dict.keys`--iterating directly over the `Header`
instance has the same behavior.
"""
for card in self._cards:
yield card.keyword
def values(self):
"""Like :meth:`dict.values`."""
for card in self._cards:
yield None if card.value == UNDEFINED else card.value
def pop(self, *args):
"""
Works like :meth:`list.pop` if no arguments or an index argument are
supplied; otherwise works like :meth:`dict.pop`.
"""
if len(args) > 2:
raise TypeError(f"Header.pop expected at most 2 arguments, got {len(args)}")
if len(args) == 0:
key = -1
else:
key = args[0]
try:
value = self[key]
except (KeyError, IndexError):
if len(args) == 2:
return args[1]
raise
del self[key]
return value
def popitem(self):
"""Similar to :meth:`dict.popitem`."""
try:
k, v = next(self.items())
except StopIteration:
raise KeyError("Header is empty")
del self[k]
return k, v
def setdefault(self, key, default=None):
"""Similar to :meth:`dict.setdefault`."""
try:
return self[key]
except (KeyError, IndexError):
self[key] = default
return default
def update(self, *args, **kwargs):
"""
Update the Header with new keyword values, updating the values of
existing keywords and appending new keywords otherwise; similar to
`dict.update`.
`update` accepts either a dict-like object or an iterable. In the
former case the keys must be header keywords and the values may be
either scalar values or (value, comment) tuples. In the case of an
iterable the items must be (keyword, value) tuples or (keyword, value,
comment) tuples.
Arbitrary arguments are also accepted, in which case the update() is
called again with the kwargs dict as its only argument. That is,
::
>>> header.update(NAXIS1=100, NAXIS2=100)
is equivalent to::
header.update({'NAXIS1': 100, 'NAXIS2': 100})
.. warning::
As this method works similarly to `dict.update` it is very
different from the ``Header.update()`` method in Astropy v0.1.
Use of the old API was
**deprecated** for a long time and is now removed. Most uses of the
old API can be replaced as follows:
* Replace ::
header.update(keyword, value)
with ::
header[keyword] = value
* Replace ::
header.update(keyword, value, comment=comment)
with ::
header[keyword] = (value, comment)
* Replace ::
header.update(keyword, value, before=before_keyword)
with ::
header.insert(before_keyword, (keyword, value))
* Replace ::
header.update(keyword, value, after=after_keyword)
with ::
header.insert(after_keyword, (keyword, value),
after=True)
See also :meth:`Header.set` which is a new method that provides an
interface similar to the old ``Header.update()`` and may help make
transition a little easier.
"""
if args:
other = args[0]
else:
other = None
def update_from_dict(k, v):
if not isinstance(v, tuple):
card = Card(k, v)
elif 0 < len(v) <= 2:
card = Card(*((k,) + v))
else:
raise ValueError(
"Header update value for key %r is invalid; the "
"value must be either a scalar, a 1-tuple "
"containing the scalar value, or a 2-tuple "
"containing the value and a comment string." % k
)
self._update(card)
if other is None:
pass
elif isinstance(other, Header):
for card in other.cards:
self._update(card)
elif hasattr(other, "items"):
for k, v in other.items():
update_from_dict(k, v)
elif hasattr(other, "keys"):
for k in other.keys():
update_from_dict(k, other[k])
else:
for idx, card in enumerate(other):
if isinstance(card, Card):
self._update(card)
elif isinstance(card, tuple) and (1 < len(card) <= 3):
self._update(Card(*card))
else:
raise ValueError(
"Header update sequence item #{} is invalid; "
"the item must either be a 2-tuple containing "
"a keyword and value, or a 3-tuple containing "
"a keyword, value, and comment string.".format(idx)
)
if kwargs:
self.update(kwargs)
def append(self, card=None, useblanks=True, bottom=False, end=False):
"""
Appends a new keyword+value card to the end of the Header, similar
to `list.append`.
By default if the last cards in the Header have commentary keywords,
this will append the new keyword before the commentary (unless the new
keyword is also commentary).
Also differs from `list.append` in that it can be called with no
arguments: In this case a blank card is appended to the end of the
Header. In the case all the keyword arguments are ignored.
Parameters
----------
card : str, tuple
A keyword or a (keyword, value, [comment]) tuple representing a
single header card; the comment is optional in which case a
2-tuple may be used
useblanks : bool, optional
If there are blank cards at the end of the Header, replace the
first blank card so that the total number of cards in the Header
does not increase. Otherwise preserve the number of blank cards.
bottom : bool, optional
If True, instead of appending after the last non-commentary card,
append after the last non-blank card.
end : bool, optional
If True, ignore the useblanks and bottom options, and append at the
very end of the Header.
"""
if isinstance(card, str):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif card is None:
card = Card()
elif not isinstance(card, Card):
raise ValueError(
"The value appended to a Header must be either a keyword or "
"(keyword, value, [comment]) tuple; got: {!r}".format(card)
)
if not end and card.is_blank:
# Blank cards should always just be appended to the end
end = True
if end:
self._cards.append(card)
idx = len(self._cards) - 1
else:
idx = len(self._cards) - 1
while idx >= 0 and self._cards[idx].is_blank:
idx -= 1
if not bottom and card.keyword not in Card._commentary_keywords:
while (
idx >= 0 and self._cards[idx].keyword in Card._commentary_keywords
):
idx -= 1
idx += 1
self._cards.insert(idx, card)
self._updateindices(idx)
keyword = Card.normalize_keyword(card.keyword)
self._keyword_indices[keyword].append(idx)
if card.field_specifier is not None:
self._rvkc_indices[card.rawkeyword].append(idx)
if not end:
# If the appended card was a commentary card, and it was appended
# before existing cards with the same keyword, the indices for
# cards with that keyword may have changed
if not bottom and card.keyword in Card._commentary_keywords:
self._keyword_indices[keyword].sort()
# Finally, if useblanks, delete a blank cards from the end
if useblanks and self._countblanks():
# Don't do this unless there is at least one blanks at the end
# of the header; we need to convert the card to its string
# image to see how long it is. In the vast majority of cases
# this will just be 80 (Card.length) but it may be longer for
# CONTINUE cards
self._useblanks(len(str(card)) // Card.length)
self._modified = True
def extend(
self,
cards,
strip=True,
unique=False,
update=False,
update_first=False,
useblanks=True,
bottom=False,
end=False,
):
"""
Appends multiple keyword+value cards to the end of the header, similar
to `list.extend`.
Parameters
----------
cards : iterable
An iterable of (keyword, value, [comment]) tuples; see
`Header.append`.
strip : bool, optional
Remove any keywords that have meaning only to specific types of
HDUs, so that only more general keywords are added from extension
Header or Card list (default: `True`).
unique : bool, optional
If `True`, ensures that no duplicate keywords are appended;
keywords already in this header are simply discarded. The
exception is commentary keywords (COMMENT, HISTORY, etc.): they are
only treated as duplicates if their values match.
update : bool, optional
If `True`, update the current header with the values and comments
from duplicate keywords in the input header. This supersedes the
``unique`` argument. Commentary keywords are treated the same as
if ``unique=True``.
update_first : bool, optional
If the first keyword in the header is 'SIMPLE', and the first
keyword in the input header is 'XTENSION', the 'SIMPLE' keyword is
replaced by the 'XTENSION' keyword. Likewise if the first keyword
in the header is 'XTENSION' and the first keyword in the input
header is 'SIMPLE', the 'XTENSION' keyword is replaced by the
'SIMPLE' keyword. This behavior is otherwise dumb as to whether or
not the resulting header is a valid primary or extension header.
This is mostly provided to support backwards compatibility with the
old ``Header.fromTxtFile`` method, and only applies if
``update=True``.
useblanks, bottom, end : bool, optional
These arguments are passed to :meth:`Header.append` while appending
new cards to the header.
"""
temp = self.__class__(cards)
if strip:
temp.strip()
if len(self):
first = self._cards[0].keyword
else:
first = None
# We don't immediately modify the header, because first we need to sift
# out any duplicates in the new header prior to adding them to the
# existing header, but while *allowing* duplicates from the header
# being extended from (see ticket #156)
extend_cards = []
for idx, card in enumerate(temp.cards):
keyword = card.keyword
if keyword not in Card._commentary_keywords:
if unique and not update and keyword in self:
continue
elif update:
if idx == 0 and update_first:
# Dumbly update the first keyword to either SIMPLE or
# XTENSION as the case may be, as was in the case in
# Header.fromTxtFile
if (keyword == "SIMPLE" and first == "XTENSION") or (
keyword == "XTENSION" and first == "SIMPLE"
):
del self[0]
self.insert(0, card)
else:
self[keyword] = (card.value, card.comment)
elif keyword in self:
self[keyword] = (card.value, card.comment)
else:
extend_cards.append(card)
else:
extend_cards.append(card)
else:
if (unique or update) and keyword in self:
if card.is_blank:
extend_cards.append(card)
continue
for value in self[keyword]:
if value == card.value:
break
else:
extend_cards.append(card)
else:
extend_cards.append(card)
for card in extend_cards:
self.append(card, useblanks=useblanks, bottom=bottom, end=end)
def count(self, keyword):
"""
Returns the count of the given keyword in the header, similar to
`list.count` if the Header object is treated as a list of keywords.
Parameters
----------
keyword : str
The keyword to count instances of in the header
"""
keyword = Card.normalize_keyword(keyword)
# We have to look before we leap, since otherwise _keyword_indices,
# being a defaultdict, will create an entry for the nonexistent keyword
if keyword not in self._keyword_indices:
raise KeyError(f"Keyword {keyword!r} not found.")
return len(self._keyword_indices[keyword])
def index(self, keyword, start=None, stop=None):
"""
Returns the index if the first instance of the given keyword in the
header, similar to `list.index` if the Header object is treated as a
list of keywords.
Parameters
----------
keyword : str
The keyword to look up in the list of all keywords in the header
start : int, optional
The lower bound for the index
stop : int, optional
The upper bound for the index
"""
if start is None:
start = 0
if stop is None:
stop = len(self._cards)
if stop < start:
step = -1
else:
step = 1
norm_keyword = Card.normalize_keyword(keyword)
for idx in range(start, stop, step):
if self._cards[idx].keyword.upper() == norm_keyword:
return idx
else:
raise ValueError(f"The keyword {keyword!r} is not in the header.")
def insert(self, key, card, useblanks=True, after=False):
"""
Inserts a new keyword+value card into the Header at a given location,
similar to `list.insert`.
Parameters
----------
key : int, str, or tuple
The index into the list of header keywords before which the
new keyword should be inserted, or the name of a keyword before
which the new keyword should be inserted. Can also accept a
(keyword, index) tuple for inserting around duplicate keywords.
card : str, tuple
A keyword or a (keyword, value, [comment]) tuple; see
`Header.append`
useblanks : bool, optional
If there are blank cards at the end of the Header, replace the
first blank card so that the total number of cards in the Header
does not increase. Otherwise preserve the number of blank cards.
after : bool, optional
If set to `True`, insert *after* the specified index or keyword,
rather than before it. Defaults to `False`.
"""
if not isinstance(key, numbers.Integral):
# Don't pass through ints to _cardindex because it will not take
# kindly to indices outside the existing number of cards in the
# header, which insert needs to be able to support (for example
# when inserting into empty headers)
idx = self._cardindex(key)
else:
idx = key
if after:
if idx == -1:
idx = len(self._cards)
else:
idx += 1
if idx >= len(self._cards):
# This is just an append (Though it must be an append absolutely to
# the bottom, ignoring blanks, etc.--the point of the insert method
# is that you get exactly what you asked for with no surprises)
self.append(card, end=True)
return
if isinstance(card, str):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif not isinstance(card, Card):
raise ValueError(
"The value inserted into a Header must be either a keyword or "
"(keyword, value, [comment]) tuple; got: {!r}".format(card)
)
self._cards.insert(idx, card)
keyword = card.keyword
# If idx was < 0, determine the actual index according to the rules
# used by list.insert()
if idx < 0:
idx += len(self._cards) - 1
if idx < 0:
idx = 0
# All the keyword indices above the insertion point must be updated
self._updateindices(idx)
keyword = Card.normalize_keyword(keyword)
self._keyword_indices[keyword].append(idx)
count = len(self._keyword_indices[keyword])
if count > 1:
# There were already keywords with this same name
if keyword not in Card._commentary_keywords:
warnings.warn(
"A {!r} keyword already exists in this header. Inserting "
"duplicate keyword.".format(keyword),
AstropyUserWarning,
)
self._keyword_indices[keyword].sort()
if card.field_specifier is not None:
# Update the index of RVKC as well
rvkc_indices = self._rvkc_indices[card.rawkeyword]
rvkc_indices.append(idx)
rvkc_indices.sort()
if useblanks:
self._useblanks(len(str(card)) // Card.length)
self._modified = True
def remove(self, keyword, ignore_missing=False, remove_all=False):
"""
Removes the first instance of the given keyword from the header similar
to `list.remove` if the Header object is treated as a list of keywords.
Parameters
----------
keyword : str
The keyword of which to remove the first instance in the header.
ignore_missing : bool, optional
When True, ignores missing keywords. Otherwise, if the keyword
is not present in the header a KeyError is raised.
remove_all : bool, optional
When True, all instances of keyword will be removed.
Otherwise only the first instance of the given keyword is removed.
"""
keyword = Card.normalize_keyword(keyword)
if keyword in self._keyword_indices:
del self[self._keyword_indices[keyword][0]]
if remove_all:
while keyword in self._keyword_indices:
del self[self._keyword_indices[keyword][0]]
elif not ignore_missing:
raise KeyError(f"Keyword '{keyword}' not found.")
def rename_keyword(self, oldkeyword, newkeyword, force=False):
"""
Rename a card's keyword in the header.
Parameters
----------
oldkeyword : str or int
Old keyword or card index
newkeyword : str
New keyword
force : bool, optional
When `True`, if the new keyword already exists in the header, force
the creation of a duplicate keyword. Otherwise a
`ValueError` is raised.
"""
oldkeyword = Card.normalize_keyword(oldkeyword)
newkeyword = Card.normalize_keyword(newkeyword)
if newkeyword == "CONTINUE":
raise ValueError("Can not rename to CONTINUE")
if (
newkeyword in Card._commentary_keywords
or oldkeyword in Card._commentary_keywords
):
if not (
newkeyword in Card._commentary_keywords
and oldkeyword in Card._commentary_keywords
):
raise ValueError(
"Regular and commentary keys can not be renamed to each other."
)
elif not force and newkeyword in self:
raise ValueError(f"Intended keyword {newkeyword} already exists in header.")
idx = self.index(oldkeyword)
card = self._cards[idx]
del self[idx]
self.insert(idx, (newkeyword, card.value, card.comment))
def add_history(self, value, before=None, after=None):
"""
Add a ``HISTORY`` card.
Parameters
----------
value : str
History text to be added.
before : str or int, optional
Same as in `Header.update`
after : str or int, optional
Same as in `Header.update`
"""
self._add_commentary("HISTORY", value, before=before, after=after)
def add_comment(self, value, before=None, after=None):
"""
Add a ``COMMENT`` card.
Parameters
----------
value : str
Text to be added.
before : str or int, optional
Same as in `Header.update`
after : str or int, optional
Same as in `Header.update`
"""
self._add_commentary("COMMENT", value, before=before, after=after)
def add_blank(self, value="", before=None, after=None):
"""
Add a blank card.
Parameters
----------
value : str, optional
Text to be added.
before : str or int, optional
Same as in `Header.update`
after : str or int, optional
Same as in `Header.update`
"""
self._add_commentary("", value, before=before, after=after)
def strip(self):
"""
Strip cards specific to a certain kind of header.
Strip cards like ``SIMPLE``, ``BITPIX``, etc. so the rest of
the header can be used to reconstruct another kind of header.
"""
# TODO: Previously this only deleted some cards specific to an HDU if
# _hdutype matched that type. But it seemed simple enough to just
# delete all desired cards anyways, and just ignore the KeyErrors if
# they don't exist.
# However, it might be desirable to make this extendable somehow--have
# a way for HDU classes to specify some headers that are specific only
# to that type, and should be removed otherwise.
naxis = self.get("NAXIS", 0)
tfields = self.get("TFIELDS", 0)
for idx in range(naxis):
self.remove("NAXIS" + str(idx + 1), ignore_missing=True)
for name in (
"TFORM",
"TSCAL",
"TZERO",
"TNULL",
"TTYPE",
"TUNIT",
"TDISP",
"TDIM",
"THEAP",
"TBCOL",
):
for idx in range(tfields):
self.remove(name + str(idx + 1), ignore_missing=True)
for name in (
"SIMPLE",
"XTENSION",
"BITPIX",
"NAXIS",
"EXTEND",
"PCOUNT",
"GCOUNT",
"GROUPS",
"BSCALE",
"BZERO",
"TFIELDS",
):
self.remove(name, ignore_missing=True)
@property
def data_size(self):
"""
Return the size (in bytes) of the data portion following the `Header`.
"""
return _hdr_data_size(self)
@property
def data_size_padded(self):
"""
Return the size (in bytes) of the data portion following the `Header`
including padding.
"""
size = self.data_size
return size + _pad_length(size)
def _update(self, card):
"""
The real update code. If keyword already exists, its value and/or
comment will be updated. Otherwise a new card will be appended.
This will not create a duplicate keyword except in the case of
commentary cards. The only other way to force creation of a duplicate
is to use the insert(), append(), or extend() methods.
"""
keyword, value, comment = card
# Lookups for existing/known keywords are case-insensitive
keyword = keyword.strip().upper()
if keyword.startswith("HIERARCH "):
keyword = keyword[9:]
if (
keyword not in Card._commentary_keywords
and keyword in self._keyword_indices
):
# Easy; just update the value/comment
idx = self._keyword_indices[keyword][0]
existing_card = self._cards[idx]
existing_card.value = value
if comment is not None:
# '' should be used to explicitly blank a comment
existing_card.comment = comment
if existing_card._modified:
self._modified = True
elif keyword in Card._commentary_keywords:
cards = self._splitcommentary(keyword, value)
if keyword in self._keyword_indices:
# Append after the last keyword of the same type
idx = self.index(keyword, start=len(self) - 1, stop=-1)
isblank = not (keyword or value or comment)
for c in reversed(cards):
self.insert(idx + 1, c, useblanks=(not isblank))
else:
for c in cards:
self.append(c, bottom=True)
else:
# A new keyword! self.append() will handle updating _modified
self.append(card)
def _cardindex(self, key):
"""Returns an index into the ._cards list given a valid lookup key."""
# This used to just set key = (key, 0) and then go on to act as if the
# user passed in a tuple, but it's much more common to just be given a
# string as the key, so optimize more for that case
if isinstance(key, str):
keyword = key
n = 0
elif isinstance(key, numbers.Integral):
# If < 0, determine the actual index
if key < 0:
key += len(self._cards)
if key < 0 or key >= len(self._cards):
raise IndexError("Header index out of range.")
return key
elif isinstance(key, slice):
return key
elif isinstance(key, tuple):
if (
len(key) != 2
or not isinstance(key[0], str)
or not isinstance(key[1], numbers.Integral)
):
raise ValueError(
"Tuple indices must be 2-tuples consisting of a "
"keyword string and an integer index."
)
keyword, n = key
else:
raise ValueError(
"Header indices must be either a string, a 2-tuple, or an integer."
)
keyword = Card.normalize_keyword(keyword)
# Returns the index into _cards for the n-th card with the given
# keyword (where n is 0-based)
indices = self._keyword_indices.get(keyword, None)
if keyword and not indices:
if len(keyword) > KEYWORD_LENGTH or "." in keyword:
raise KeyError(f"Keyword {keyword!r} not found.")
else:
# Maybe it's a RVKC?
indices = self._rvkc_indices.get(keyword, None)
if not indices:
raise KeyError(f"Keyword {keyword!r} not found.")
try:
return indices[n]
except IndexError:
raise IndexError(
f"There are only {len(indices)} {keyword!r} cards in the header."
)
def _keyword_from_index(self, idx):
"""
Given an integer index, return the (keyword, repeat) tuple that index
refers to. For most keywords the repeat will always be zero, but it
may be greater than zero for keywords that are duplicated (especially
commentary keywords).
In a sense this is the inverse of self.index, except that it also
supports duplicates.
"""
if idx < 0:
idx += len(self._cards)
keyword = self._cards[idx].keyword
keyword = Card.normalize_keyword(keyword)
repeat = self._keyword_indices[keyword].index(idx)
return keyword, repeat
def _relativeinsert(self, card, before=None, after=None, replace=False):
"""
Inserts a new card before or after an existing card; used to
implement support for the legacy before/after keyword arguments to
Header.update().
If replace=True, move an existing card with the same keyword.
"""
if before is None:
insertionkey = after
else:
insertionkey = before
def get_insertion_idx():
if not (
isinstance(insertionkey, numbers.Integral)
and insertionkey >= len(self._cards)
):
idx = self._cardindex(insertionkey)
else:
idx = insertionkey
if before is None:
idx += 1
return idx
if replace:
# The card presumably already exists somewhere in the header.
# Check whether or not we actually have to move it; if it does need
# to be moved we just delete it and then it will be reinserted
# below
old_idx = self._cardindex(card.keyword)
insertion_idx = get_insertion_idx()
if insertion_idx >= len(self._cards) and old_idx == len(self._cards) - 1:
# The card would be appended to the end, but it's already at
# the end
return
if before is not None:
if old_idx == insertion_idx - 1:
return
elif after is not None and old_idx == insertion_idx:
return
del self[old_idx]
# Even if replace=True, the insertion idx may have changed since the
# old card was deleted
idx = get_insertion_idx()
if card[0] in Card._commentary_keywords:
cards = reversed(self._splitcommentary(card[0], card[1]))
else:
cards = [card]
for c in cards:
self.insert(idx, c)
def _updateindices(self, idx, increment=True):
"""
For all cards with index above idx, increment or decrement its index
value in the keyword_indices dict.
"""
if idx > len(self._cards):
# Save us some effort
return
increment = 1 if increment else -1
for index_sets in (self._keyword_indices, self._rvkc_indices):
for indices in index_sets.values():
for jdx, keyword_index in enumerate(indices):
if keyword_index >= idx:
indices[jdx] += increment
def _countblanks(self):
"""Returns the number of blank cards at the end of the Header."""
for idx in range(1, len(self._cards)):
if not self._cards[-idx].is_blank:
return idx - 1
return 0
def _useblanks(self, count):
for _ in range(count):
if self._cards[-1].is_blank:
del self[-1]
else:
break
def _haswildcard(self, keyword):
"""Return `True` if the input keyword contains a wildcard pattern."""
return isinstance(keyword, str) and (
keyword.endswith("...") or "*" in keyword or "?" in keyword
)
def _wildcardmatch(self, pattern):
"""
Returns a list of indices of the cards matching the given wildcard
pattern.
* '*' matches 0 or more characters
* '?' matches a single character
* '...' matches 0 or more of any non-whitespace character
"""
pattern = pattern.replace("*", r".*").replace("?", r".")
pattern = pattern.replace("...", r"\S*") + "$"
pattern_re = re.compile(pattern, re.I)
return [
idx
for idx, card in enumerate(self._cards)
if pattern_re.match(card.keyword)
]
def _set_slice(self, key, value, target):
"""
Used to implement Header.__setitem__ and CardAccessor.__setitem__.
"""
if isinstance(key, slice) or self._haswildcard(key):
if isinstance(key, slice):
indices = range(*key.indices(len(target)))
else:
indices = self._wildcardmatch(key)
if isinstance(value, str) or not isiterable(value):
value = itertools.repeat(value, len(indices))
for idx, val in zip(indices, value):
target[idx] = val
return True
return False
def _splitcommentary(self, keyword, value):
"""
Given a commentary keyword and value, returns a list of the one or more
cards needed to represent the full value. This is primarily used to
create the multiple commentary cards needed to represent a long value
that won't fit into a single commentary card.
"""
# The maximum value in each card can be the maximum card length minus
# the maximum key length (which can include spaces if they key length
# less than 8
maxlen = Card.length - KEYWORD_LENGTH
valuestr = str(value)
if len(valuestr) <= maxlen:
# The value can fit in a single card
cards = [Card(keyword, value)]
else:
# The value must be split across multiple consecutive commentary
# cards
idx = 0
cards = []
while idx < len(valuestr):
cards.append(Card(keyword, valuestr[idx : idx + maxlen]))
idx += maxlen
return cards
def _add_commentary(self, key, value, before=None, after=None):
"""
Add a commentary card.
If ``before`` and ``after`` are `None`, add to the last occurrence
of cards of the same name (except blank card). If there is no
card (or blank card), append at the end.
"""
if before is not None or after is not None:
self._relativeinsert((key, value), before=before, after=after)
else:
self[key] = value
collections.abc.MutableSequence.register(Header)
collections.abc.MutableMapping.register(Header)
class _DelayedHeader:
"""
Descriptor used to create the Header object from the header string that
was stored in HDU._header_str when parsing the file.
"""
def __get__(self, obj, owner=None):
try:
return obj.__dict__["_header"]
except KeyError:
if obj._header_str is not None:
hdr = Header.fromstring(obj._header_str)
obj._header_str = None
else:
raise AttributeError(
f"'{type(obj).__name__}' object has no attribute '_header'"
)
obj.__dict__["_header"] = hdr
return hdr
def __set__(self, obj, val):
obj.__dict__["_header"] = val
def __delete__(self, obj):
del obj.__dict__["_header"]
class _BasicHeaderCards:
"""
This class allows to access cards with the _BasicHeader.cards attribute.
This is needed because during the HDU class detection, some HDUs uses
the .cards interface. Cards cannot be modified here as the _BasicHeader
object will be deleted once the HDU object is created.
"""
def __init__(self, header):
self.header = header
def __getitem__(self, key):
# .cards is a list of cards, so key here is an integer.
# get the keyword name from its index.
key = self.header._keys[key]
# then we get the card from the _BasicHeader._cards list, or parse it
# if needed.
try:
return self.header._cards[key]
except KeyError:
cardstr = self.header._raw_cards[key]
card = Card.fromstring(cardstr)
self.header._cards[key] = card
return card
class _BasicHeader(collections.abc.Mapping):
"""This class provides a fast header parsing, without all the additional
features of the Header class. Here only standard keywords are parsed, no
support for CONTINUE, HIERARCH, COMMENT, HISTORY, or rvkc.
The raw card images are stored and parsed only if needed. The idea is that
to create the HDU objects, only a small subset of standard cards is needed.
Once a card is parsed, which is deferred to the Card class, the Card object
is kept in a cache. This is useful because a small subset of cards is used
a lot in the HDU creation process (NAXIS, XTENSION, ...).
"""
def __init__(self, cards):
# dict of (keywords, card images)
self._raw_cards = cards
self._keys = list(cards.keys())
# dict of (keyword, Card object) storing the parsed cards
self._cards = {}
# the _BasicHeaderCards object allows to access Card objects from
# keyword indices
self.cards = _BasicHeaderCards(self)
self._modified = False
def __getitem__(self, key):
if isinstance(key, numbers.Integral):
key = self._keys[key]
try:
return self._cards[key].value
except KeyError:
# parse the Card and store it
cardstr = self._raw_cards[key]
self._cards[key] = card = Card.fromstring(cardstr)
return card.value
def __len__(self):
return len(self._raw_cards)
def __iter__(self):
return iter(self._raw_cards)
def index(self, keyword):
return self._keys.index(keyword)
@property
def data_size(self):
"""
Return the size (in bytes) of the data portion following the `Header`.
"""
return _hdr_data_size(self)
@property
def data_size_padded(self):
"""
Return the size (in bytes) of the data portion following the `Header`
including padding.
"""
size = self.data_size
return size + _pad_length(size)
@classmethod
def fromfile(cls, fileobj):
"""The main method to parse a FITS header from a file. The parsing is
done with the parse_header function implemented in Cython.
"""
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, "rb")
close_file = True
try:
header_str, cards = parse_header(fileobj)
_check_padding(header_str, BLOCK_SIZE, False)
return header_str, cls(cards)
finally:
if close_file:
fileobj.close()
class _CardAccessor:
"""
This is a generic class for wrapping a Header in such a way that you can
use the header's slice/filtering capabilities to return a subset of cards
and do something with them.
This is sort of the opposite notion of the old CardList class--whereas
Header used to use CardList to get lists of cards, this uses Header to get
lists of cards.
"""
# TODO: Consider giving this dict/list methods like Header itself
def __init__(self, header):
self._header = header
def __repr__(self):
return "\n".join(repr(c) for c in self._header._cards)
def __len__(self):
return len(self._header._cards)
def __iter__(self):
return iter(self._header._cards)
def __eq__(self, other):
# If the `other` item is a scalar we will still treat it as equal if
# this _CardAccessor only contains one item
if not isiterable(other) or isinstance(other, str):
if len(self) == 1:
other = [other]
else:
return False
return all(a == b for a, b in itertools.zip_longest(self, other))
def __ne__(self, other):
return not (self == other)
def __getitem__(self, item):
if isinstance(item, slice) or self._header._haswildcard(item):
return self.__class__(self._header[item])
idx = self._header._cardindex(item)
return self._header._cards[idx]
def _setslice(self, item, value):
"""
Helper for implementing __setitem__ on _CardAccessor subclasses; slices
should always be handled in this same way.
"""
if isinstance(item, slice) or self._header._haswildcard(item):
if isinstance(item, slice):
indices = range(*item.indices(len(self)))
else:
indices = self._header._wildcardmatch(item)
if isinstance(value, str) or not isiterable(value):
value = itertools.repeat(value, len(indices))
for idx, val in zip(indices, value):
self[idx] = val
return True
return False
class _HeaderComments(_CardAccessor):
"""
A class used internally by the Header class for the Header.comments
attribute access.
This object can be used to display all the keyword comments in the Header,
or look up the comments on specific keywords. It allows all the same forms
of keyword lookup as the Header class itself, but returns comments instead
of values.
"""
def __iter__(self):
for card in self._header._cards:
yield card.comment
def __repr__(self):
"""Returns a simple list of all keywords and their comments."""
keyword_length = KEYWORD_LENGTH
for card in self._header._cards:
keyword_length = max(keyword_length, len(card.keyword))
return "\n".join(
"{:>{len}} {}".format(c.keyword, c.comment, len=keyword_length)
for c in self._header._cards
)
def __getitem__(self, item):
"""
Slices and filter strings return a new _HeaderComments containing the
returned cards. Otherwise the comment of a single card is returned.
"""
item = super().__getitem__(item)
if isinstance(item, _HeaderComments):
# The item key was a slice
return item
return item.comment
def __setitem__(self, item, comment):
"""
Set/update the comment on specified card or cards.
Slice/filter updates work similarly to how Header.__setitem__ works.
"""
if self._header._set_slice(item, comment, self):
return
# In this case, key/index errors should be raised; don't update
# comments of nonexistent cards
idx = self._header._cardindex(item)
value = self._header[idx]
self._header[idx] = (value, comment)
class _HeaderCommentaryCards(_CardAccessor):
"""
This is used to return a list-like sequence over all the values in the
header for a given commentary keyword, such as HISTORY.
"""
def __init__(self, header, keyword=""):
super().__init__(header)
self._keyword = keyword
self._count = self._header.count(self._keyword)
self._indices = slice(self._count).indices(self._count)
# __len__ and __iter__ need to be overridden from the base class due to the
# different approach this class has to take for slicing
def __len__(self):
return len(range(*self._indices))
def __iter__(self):
for idx in range(*self._indices):
yield self._header[(self._keyword, idx)]
def __repr__(self):
return "\n".join(str(x) for x in self)
def __getitem__(self, idx):
if isinstance(idx, slice):
n = self.__class__(self._header, self._keyword)
n._indices = idx.indices(self._count)
return n
elif not isinstance(idx, numbers.Integral):
raise ValueError(f"{self._keyword} index must be an integer")
idx = list(range(*self._indices))[idx]
return self._header[(self._keyword, idx)]
def __setitem__(self, item, value):
"""
Set the value of a specified commentary card or cards.
Slice/filter updates work similarly to how Header.__setitem__ works.
"""
if self._header._set_slice(item, value, self):
return
# In this case, key/index errors should be raised; don't update
# comments of nonexistent cards
self._header[(self._keyword, item)] = value
def _block_size(sep):
"""
Determine the size of a FITS header block if a non-blank separator is used
between cards.
"""
return BLOCK_SIZE + (len(sep) * (BLOCK_SIZE // Card.length - 1))
def _pad_length(stringlen):
"""Bytes needed to pad the input stringlen to the next FITS block."""
return (BLOCK_SIZE - (stringlen % BLOCK_SIZE)) % BLOCK_SIZE
def _check_padding(header_str, block_size, is_eof, check_block_size=True):
# Strip any zero-padding (see ticket #106)
if header_str and header_str[-1] == "\0":
if is_eof and header_str.strip("\0") == "":
# TODO: Pass this warning to validation framework
warnings.warn(
"Unexpected extra padding at the end of the file. This "
"padding may not be preserved when saving changes.",
AstropyUserWarning,
)
raise EOFError()
else:
# Replace the illegal null bytes with spaces as required by
# the FITS standard, and issue a nasty warning
# TODO: Pass this warning to validation framework
warnings.warn(
"Header block contains null bytes instead of spaces for "
"padding, and is not FITS-compliant. Nulls may be "
"replaced with spaces upon writing.",
AstropyUserWarning,
)
header_str.replace("\0", " ")
if check_block_size and (len(header_str) % block_size) != 0:
# This error message ignores the length of the separator for
# now, but maybe it shouldn't?
actual_len = len(header_str) - block_size + BLOCK_SIZE
# TODO: Pass this error to validation framework
raise ValueError(f"Header size is not multiple of {BLOCK_SIZE}: {actual_len}")
def _hdr_data_size(header):
"""Calculate the data size (in bytes) following the given `Header`."""
size = 0
naxis = header.get("NAXIS", 0)
if naxis > 0:
size = 1
for idx in range(naxis):
size = size * header["NAXIS" + str(idx + 1)]
bitpix = header["BITPIX"]
gcount = header.get("GCOUNT", 1)
pcount = header.get("PCOUNT", 0)
size = abs(bitpix) * gcount * (pcount + size) // 8
return size
|
f9760c7819677880031ea3ac57fdb7ddc3b35b3f929dc7955d8582a74b1bae1c | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import copy
import operator
import warnings
import weakref
from contextlib import suppress
from functools import reduce
import numpy as np
from numpy import char as chararray
from astropy.utils import lazyproperty
from .column import (
_VLF,
ASCII2NUMPY,
ASCII2STR,
ASCIITNULL,
FITS2NUMPY,
ColDefs,
Delayed,
_AsciiColDefs,
_FormatP,
_FormatX,
_get_index,
_makep,
_unwrapx,
_wrapx,
)
from .util import _rstrip_inplace, decode_ascii, encode_ascii
class FITS_record:
"""
FITS record class.
`FITS_record` is used to access records of the `FITS_rec` object.
This will allow us to deal with scaled columns. It also handles
conversion/scaling of columns in ASCII tables. The `FITS_record`
class expects a `FITS_rec` object as input.
"""
def __init__(
self, input, row=0, start=None, end=None, step=None, base=None, **kwargs
):
"""
Parameters
----------
input : array
The array to wrap.
row : int, optional
The starting logical row of the array.
start : int, optional
The starting column in the row associated with this object.
Used for subsetting the columns of the `FITS_rec` object.
end : int, optional
The ending column in the row associated with this object.
Used for subsetting the columns of the `FITS_rec` object.
"""
self.array = input
self.row = row
if base:
width = len(base)
else:
width = self.array._nfields
s = slice(start, end, step).indices(width)
self.start, self.end, self.step = s
self.base = base
def __getitem__(self, key):
if isinstance(key, str):
indx = _get_index(self.array.names, key)
if indx < self.start or indx > self.end - 1:
raise KeyError(f"Key '{key}' does not exist.")
elif isinstance(key, slice):
return type(self)(self.array, self.row, key.start, key.stop, key.step, self)
else:
indx = self._get_index(key)
if indx > self.array._nfields - 1:
raise IndexError("Index out of bounds")
return self.array.field(indx)[self.row]
def __setitem__(self, key, value):
if isinstance(key, str):
indx = _get_index(self.array.names, key)
if indx < self.start or indx > self.end - 1:
raise KeyError(f"Key '{key}' does not exist.")
elif isinstance(key, slice):
for indx in range(slice.start, slice.stop, slice.step):
indx = self._get_indx(indx)
self.array.field(indx)[self.row] = value
else:
indx = self._get_index(key)
if indx > self.array._nfields - 1:
raise IndexError("Index out of bounds")
self.array.field(indx)[self.row] = value
def __len__(self):
return len(range(self.start, self.end, self.step))
def __repr__(self):
"""
Display a single row.
"""
outlist = []
for idx in range(len(self)):
outlist.append(repr(self[idx]))
return f"({', '.join(outlist)})"
def field(self, field):
"""
Get the field data of the record.
"""
return self.__getitem__(field)
def setfield(self, field, value):
"""
Set the field data of the record.
"""
self.__setitem__(field, value)
@lazyproperty
def _bases(self):
bases = [weakref.proxy(self)]
base = self.base
while base:
bases.append(base)
base = base.base
return bases
def _get_index(self, indx):
indices = np.ogrid[: self.array._nfields]
for base in reversed(self._bases):
if base.step < 1:
s = slice(base.start, None, base.step)
else:
s = slice(base.start, base.end, base.step)
indices = indices[s]
return indices[indx]
class FITS_rec(np.recarray):
"""
FITS record array class.
`FITS_rec` is the data part of a table HDU's data part. This is a layer
over the `~numpy.recarray`, so we can deal with scaled columns.
It inherits all of the standard methods from `numpy.ndarray`.
"""
_record_type = FITS_record
_character_as_bytes = False
def __new__(subtype, input):
"""
Construct a FITS record array from a recarray.
"""
# input should be a record array
if input.dtype.subdtype is None:
self = np.recarray.__new__(
subtype, input.shape, input.dtype, buf=input.data
)
else:
self = np.recarray.__new__(
subtype, input.shape, input.dtype, buf=input.data, strides=input.strides
)
self._init()
if self.dtype.fields:
self._nfields = len(self.dtype.fields)
return self
def __setstate__(self, state):
meta = state[-1]
column_state = state[-2]
state = state[:-2]
super().__setstate__(state)
self._col_weakrefs = weakref.WeakSet()
for attr, value in zip(meta, column_state):
setattr(self, attr, value)
def __reduce__(self):
"""
Return a 3-tuple for pickling a FITS_rec. Use the super-class
functionality but then add in a tuple of FITS_rec-specific
values that get used in __setstate__.
"""
reconst_func, reconst_func_args, state = super().__reduce__()
# Define FITS_rec-specific attrs that get added to state
column_state = []
meta = []
for attrs in [
"_converted",
"_heapoffset",
"_heapsize",
"_nfields",
"_gap",
"_uint",
"parnames",
"_coldefs",
]:
with suppress(AttributeError):
# _coldefs can be Delayed, and file objects cannot be
# picked, it needs to be deepcopied first
if attrs == "_coldefs":
column_state.append(self._coldefs.__deepcopy__(None))
else:
column_state.append(getattr(self, attrs))
meta.append(attrs)
state = state + (column_state, meta)
return reconst_func, reconst_func_args, state
def __array_finalize__(self, obj):
if obj is None:
return
if isinstance(obj, FITS_rec):
self._character_as_bytes = obj._character_as_bytes
if isinstance(obj, FITS_rec) and obj.dtype == self.dtype:
self._converted = obj._converted
self._heapoffset = obj._heapoffset
self._heapsize = obj._heapsize
self._col_weakrefs = obj._col_weakrefs
self._coldefs = obj._coldefs
self._nfields = obj._nfields
self._gap = obj._gap
self._uint = obj._uint
elif self.dtype.fields is not None:
# This will allow regular ndarrays with fields, rather than
# just other FITS_rec objects
self._nfields = len(self.dtype.fields)
self._converted = {}
self._heapoffset = getattr(obj, "_heapoffset", 0)
self._heapsize = getattr(obj, "_heapsize", 0)
self._gap = getattr(obj, "_gap", 0)
self._uint = getattr(obj, "_uint", False)
self._col_weakrefs = weakref.WeakSet()
self._coldefs = ColDefs(self)
# Work around chicken-egg problem. Column.array relies on the
# _coldefs attribute to set up ref back to parent FITS_rec; however
# in the above line the self._coldefs has not been assigned yet so
# this fails. This patches that up...
for col in self._coldefs:
del col.array
col._parent_fits_rec = weakref.ref(self)
else:
self._init()
def _init(self):
"""Initializes internal attributes specific to FITS-isms."""
self._nfields = 0
self._converted = {}
self._heapoffset = 0
self._heapsize = 0
self._col_weakrefs = weakref.WeakSet()
self._coldefs = None
self._gap = 0
self._uint = False
@classmethod
def from_columns(cls, columns, nrows=0, fill=False, character_as_bytes=False):
"""
Given a `ColDefs` object of unknown origin, initialize a new `FITS_rec`
object.
.. note::
This was originally part of the ``new_table`` function in the table
module but was moved into a class method since most of its
functionality always had more to do with initializing a `FITS_rec`
object than anything else, and much of it also overlapped with
``FITS_rec._scale_back``.
Parameters
----------
columns : sequence of `Column` or a `ColDefs`
The columns from which to create the table data. If these
columns have data arrays attached that data may be used in
initializing the new table. Otherwise the input columns
will be used as a template for a new table with the requested
number of rows.
nrows : int
Number of rows in the new table. If the input columns have data
associated with them, the size of the largest input column is used.
Otherwise the default is 0.
fill : bool
If `True`, will fill all cells with zeros or blanks. If
`False`, copy the data from input, undefined cells will still
be filled with zeros/blanks.
"""
if not isinstance(columns, ColDefs):
columns = ColDefs(columns)
# read the delayed data
for column in columns:
arr = column.array
if isinstance(arr, Delayed):
if arr.hdu.data is None:
column.array = None
else:
column.array = _get_recarray_field(arr.hdu.data, arr.field)
# Reset columns._arrays (which we may want to just do away with
# altogether
del columns._arrays
# use the largest column shape as the shape of the record
if nrows == 0:
for arr in columns._arrays:
if arr is not None:
dim = arr.shape[0]
else:
dim = 0
if dim > nrows:
nrows = dim
raw_data = np.empty(columns.dtype.itemsize * nrows, dtype=np.uint8)
raw_data.fill(ord(columns._padding_byte))
data = np.recarray(nrows, dtype=columns.dtype, buf=raw_data).view(cls)
data._character_as_bytes = character_as_bytes
# Previously this assignment was made from hdu.columns, but that's a
# bug since if a _TableBaseHDU has a FITS_rec in its .data attribute
# the _TableBaseHDU.columns property is actually returned from
# .data._coldefs, so this assignment was circular! Don't make that
# mistake again.
# All of this is an artifact of the fragility of the FITS_rec class,
# and that it can't just be initialized by columns...
data._coldefs = columns
# If fill is True we don't copy anything from the column arrays. We're
# just using them as a template, and returning a table filled with
# zeros/blanks
if fill:
return data
# Otherwise we have to fill the recarray with data from the input
# columns
for idx, column in enumerate(columns):
# For each column in the ColDef object, determine the number of
# rows in that column. This will be either the number of rows in
# the ndarray associated with the column, or the number of rows
# given in the call to this function, which ever is smaller. If
# the input FILL argument is true, the number of rows is set to
# zero so that no data is copied from the original input data.
arr = column.array
if arr is None:
array_size = 0
else:
array_size = len(arr)
n = min(array_size, nrows)
# TODO: At least *some* of this logic is mostly redundant with the
# _convert_foo methods in this class; see if we can eliminate some
# of that duplication.
if not n:
# The input column had an empty array, so just use the fill
# value
continue
field = _get_recarray_field(data, idx)
name = column.name
fitsformat = column.format
recformat = fitsformat.recformat
outarr = field[:n]
inarr = arr[:n]
if isinstance(recformat, _FormatX):
# Data is a bit array
if inarr.shape[-1] == recformat.repeat:
_wrapx(inarr, outarr, recformat.repeat)
continue
elif isinstance(recformat, _FormatP):
data._cache_field(name, _makep(inarr, field, recformat, nrows=nrows))
continue
# TODO: Find a better way of determining that the column is meant
# to be FITS L formatted
elif recformat[-2:] == FITS2NUMPY["L"] and inarr.dtype == bool:
# column is boolean
# The raw data field should be filled with either 'T' or 'F'
# (not 0). Use 'F' as a default
field[:] = ord("F")
# Also save the original boolean array in data._converted so
# that it doesn't have to be re-converted
converted = np.zeros(field.shape, dtype=bool)
converted[:n] = inarr
data._cache_field(name, converted)
# TODO: Maybe this step isn't necessary at all if _scale_back
# will handle it?
inarr = np.where(inarr == np.False_, ord("F"), ord("T"))
elif columns[idx]._physical_values and columns[idx]._pseudo_unsigned_ints:
# Temporary hack...
bzero = column.bzero
converted = np.zeros(field.shape, dtype=inarr.dtype)
converted[:n] = inarr
data._cache_field(name, converted)
if n < nrows:
# Pre-scale rows below the input data
field[n:] = -bzero
inarr = inarr - bzero
elif isinstance(columns, _AsciiColDefs):
# Regardless whether the format is character or numeric, if the
# input array contains characters then it's already in the raw
# format for ASCII tables
if fitsformat._pseudo_logical:
# Hack to support converting from 8-bit T/F characters
# Normally the column array is a chararray of 1 character
# strings, but we need to view it as a normal ndarray of
# 8-bit ints to fill it with ASCII codes for 'T' and 'F'
outarr = field.view(np.uint8, np.ndarray)[:n]
elif arr.dtype.kind not in ("S", "U"):
# Set up views of numeric columns with the appropriate
# numeric dtype
# Fill with the appropriate blanks for the column format
data._cache_field(name, np.zeros(nrows, dtype=arr.dtype))
outarr = data._converted[name][:n]
outarr[:] = inarr
continue
if inarr.shape != outarr.shape:
if (
inarr.dtype.kind == outarr.dtype.kind
and inarr.dtype.kind in ("U", "S")
and inarr.dtype != outarr.dtype
):
inarr_rowsize = inarr[0].size
inarr = inarr.flatten().view(outarr.dtype)
# This is a special case to handle input arrays with
# non-trivial TDIMn.
# By design each row of the outarray is 1-D, while each row of
# the input array may be n-D
if outarr.ndim > 1:
# The normal case where the first dimension is the rows
inarr_rowsize = inarr[0].size
inarr = inarr.reshape(n, inarr_rowsize)
outarr[:, :inarr_rowsize] = inarr
else:
# Special case for strings where the out array only has one
# dimension (the second dimension is rolled up into the
# strings
outarr[:n] = inarr.ravel()
else:
outarr[:] = inarr
# Now replace the original column array references with the new
# fields
# This is required to prevent the issue reported in
# https://github.com/spacetelescope/PyFITS/issues/99
for idx in range(len(columns)):
columns._arrays[idx] = data.field(idx)
return data
def __repr__(self):
# Force use of the normal ndarray repr (rather than the new
# one added for recarray in Numpy 1.10) for backwards compat
return np.ndarray.__repr__(self)
def __getattribute__(self, attr):
# First, see if ndarray has this attr, and return it if so. Note that
# this means a field with the same name as an ndarray attr cannot be
# accessed by attribute, this is Numpy's default behavior.
# We avoid using np.recarray.__getattribute__ here because after doing
# this check it would access the columns without doing the conversions
# that we need (with .field, see below).
try:
return object.__getattribute__(self, attr)
except AttributeError:
pass
# attr might still be a fieldname. If we have column definitions,
# we should access this via .field, as the data may have to be scaled.
if self._coldefs is not None and attr in self.columns.names:
return self.field(attr)
# If not, just let the usual np.recarray override deal with it.
return super().__getattribute__(attr)
def __getitem__(self, key):
if self._coldefs is None:
return super().__getitem__(key)
if isinstance(key, str):
return self.field(key)
# Have to view as a recarray then back as a FITS_rec, otherwise the
# circular reference fix/hack in FITS_rec.field() won't preserve
# the slice.
out = self.view(np.recarray)[key]
if type(out) is not np.recarray:
# Oops, we got a single element rather than a view. In that case,
# return a Record, which has no __getstate__ and is more efficient.
return self._record_type(self, key)
# We got a view; change it back to our class, and add stuff
out = out.view(type(self))
out._uint = self._uint
out._coldefs = ColDefs(self._coldefs)
arrays = []
out._converted = {}
for idx, name in enumerate(self._coldefs.names):
#
# Store the new arrays for the _coldefs object
#
arrays.append(self._coldefs._arrays[idx][key])
# Ensure that the sliced FITS_rec will view the same scaled
# columns as the original; this is one of the few cases where
# it is not necessary to use _cache_field()
if name in self._converted:
dummy = self._converted[name]
field = np.ndarray.__getitem__(dummy, key)
out._converted[name] = field
out._coldefs._arrays = arrays
return out
def __setitem__(self, key, value):
if self._coldefs is None:
return super().__setitem__(key, value)
if isinstance(key, str):
self[key][:] = value
return
if isinstance(key, slice):
end = min(len(self), key.stop or len(self))
end = max(0, end)
start = max(0, key.start or 0)
end = min(end, start + len(value))
for idx in range(start, end):
self.__setitem__(idx, value[idx - start])
return
if isinstance(value, FITS_record):
for idx in range(self._nfields):
self.field(self.names[idx])[key] = value.field(self.names[idx])
elif isinstance(value, (tuple, list, np.void)):
if self._nfields == len(value):
for idx in range(self._nfields):
self.field(idx)[key] = value[idx]
else:
raise ValueError(
f"Input tuple or list required to have {self._nfields} elements."
)
else:
raise TypeError(
"Assignment requires a FITS_record, tuple, or list as input."
)
def _ipython_key_completions_(self):
return self.names
def copy(self, order="C"):
"""
The Numpy documentation lies; `numpy.ndarray.copy` is not equivalent to
`numpy.copy`. Differences include that it re-views the copied array as
self's ndarray subclass, as though it were taking a slice; this means
``__array_finalize__`` is called and the copy shares all the array
attributes (including ``._converted``!). So we need to make a deep
copy of all those attributes so that the two arrays truly do not share
any data.
"""
new = super().copy(order=order)
new.__dict__ = copy.deepcopy(self.__dict__)
return new
@property
def columns(self):
"""A user-visible accessor for the coldefs."""
return self._coldefs
@property
def _coldefs(self):
# This used to be a normal internal attribute, but it was changed to a
# property as a quick and transparent way to work around the reference
# leak bug fixed in https://github.com/astropy/astropy/pull/4539
#
# See the long comment in the Column.array property for more details
# on this. But in short, FITS_rec now has a ._col_weakrefs attribute
# which is a WeakSet of weakrefs to each Column in _coldefs.
#
# So whenever ._coldefs is set we also add each Column in the ColDefs
# to the weakrefs set. This is an easy way to find out if a Column has
# any references to it external to the FITS_rec (i.e. a user assigned a
# column to a variable). If the column is still in _col_weakrefs then
# there are other references to it external to this FITS_rec. We use
# that information in __del__ to save off copies of the array data
# for those columns to their Column.array property before our memory
# is freed.
return self.__dict__.get("_coldefs")
@_coldefs.setter
def _coldefs(self, cols):
self.__dict__["_coldefs"] = cols
if isinstance(cols, ColDefs):
for col in cols.columns:
self._col_weakrefs.add(col)
@_coldefs.deleter
def _coldefs(self):
try:
del self.__dict__["_coldefs"]
except KeyError as exc:
raise AttributeError(exc.args[0])
def __del__(self):
try:
del self._coldefs
if self.dtype.fields is not None:
for col in self._col_weakrefs:
if col.array is not None:
col.array = col.array.copy()
# See issues #4690 and #4912
except (AttributeError, TypeError): # pragma: no cover
pass
@property
def names(self):
"""List of column names."""
if self.dtype.fields:
return list(self.dtype.names)
elif getattr(self, "_coldefs", None) is not None:
return self._coldefs.names
else:
return None
@property
def formats(self):
"""List of column FITS formats."""
if getattr(self, "_coldefs", None) is not None:
return self._coldefs.formats
return None
@property
def _raw_itemsize(self):
"""
Returns the size of row items that would be written to the raw FITS
file, taking into account the possibility of unicode columns being
compactified.
Currently for internal use only.
"""
if _has_unicode_fields(self):
total_itemsize = 0
for field in self.dtype.fields.values():
itemsize = field[0].itemsize
if field[0].kind == "U":
itemsize = itemsize // 4
total_itemsize += itemsize
return total_itemsize
else:
# Just return the normal itemsize
return self.itemsize
def field(self, key):
"""
A view of a `Column`'s data as an array.
"""
# NOTE: The *column* index may not be the same as the field index in
# the recarray, if the column is a phantom column
column = self.columns[key]
name = column.name
format = column.format
if format.dtype.itemsize == 0:
warnings.warn(
"Field {!r} has a repeat count of 0 in its format code, "
"indicating an empty field.".format(key)
)
return np.array([], dtype=format.dtype)
# If field's base is a FITS_rec, we can run into trouble because it
# contains a reference to the ._coldefs object of the original data;
# this can lead to a circular reference; see ticket #49
base = self
while isinstance(base, FITS_rec) and isinstance(base.base, np.recarray):
base = base.base
# base could still be a FITS_rec in some cases, so take care to
# use rec.recarray.field to avoid a potential infinite
# recursion
field = _get_recarray_field(base, name)
if name not in self._converted:
recformat = format.recformat
# TODO: If we're now passing the column to these subroutines, do we
# really need to pass them the recformat?
if isinstance(recformat, _FormatP):
# for P format
converted = self._convert_p(column, field, recformat)
else:
# Handle all other column data types which are fixed-width
# fields
converted = self._convert_other(column, field, recformat)
# Note: Never assign values directly into the self._converted dict;
# always go through self._cache_field; this way self._converted is
# only used to store arrays that are not already direct views of
# our own data.
self._cache_field(name, converted)
return converted
return self._converted[name]
def _cache_field(self, name, field):
"""
Do not store fields in _converted if one of its bases is self,
or if it has a common base with self.
This results in a reference cycle that cannot be broken since
ndarrays do not participate in cyclic garbage collection.
"""
base = field
while True:
self_base = self
while True:
if self_base is base:
return
if getattr(self_base, "base", None) is not None:
self_base = self_base.base
else:
break
if getattr(base, "base", None) is not None:
base = base.base
else:
break
self._converted[name] = field
def _update_column_attribute_changed(self, column, idx, attr, old_value, new_value):
"""
Update how the data is formatted depending on changes to column
attributes initiated by the user through the `Column` interface.
Dispatches column attribute change notifications to individual methods
for each attribute ``_update_column_<attr>``
"""
method_name = f"_update_column_{attr}"
if hasattr(self, method_name):
# Right now this is so we can be lazy and not implement updaters
# for every attribute yet--some we may not need at all, TBD
getattr(self, method_name)(column, idx, old_value, new_value)
def _update_column_name(self, column, idx, old_name, name):
"""Update the dtype field names when a column name is changed."""
dtype = self.dtype
# Updating the names on the dtype should suffice
dtype.names = dtype.names[:idx] + (name,) + dtype.names[idx + 1 :]
def _convert_x(self, field, recformat):
"""Convert a raw table column to a bit array as specified by the
FITS X format.
"""
dummy = np.zeros(self.shape + (recformat.repeat,), dtype=np.bool_)
_unwrapx(field, dummy, recformat.repeat)
return dummy
def _convert_p(self, column, field, recformat):
"""Convert a raw table column of FITS P or Q format descriptors
to a VLA column with the array data returned from the heap.
"""
if column.dim:
vla_shape = tuple(
reversed(tuple(map(int, column.dim.strip("()").split(","))))
)
dummy = _VLF([None] * len(self), dtype=recformat.dtype)
raw_data = self._get_raw_data()
if raw_data is None:
raise OSError(
"Could not find heap data for the {!r} variable-length "
"array column.".format(column.name)
)
for idx in range(len(self)):
offset = field[idx, 1] + self._heapoffset
count = field[idx, 0]
if recformat.dtype == "a":
dt = np.dtype(recformat.dtype + str(1))
arr_len = count * dt.itemsize
da = raw_data[offset : offset + arr_len].view(dt)
da = np.char.array(da.view(dtype=dt), itemsize=count)
dummy[idx] = decode_ascii(da)
else:
dt = np.dtype(recformat.dtype)
arr_len = count * dt.itemsize
dummy[idx] = raw_data[offset : offset + arr_len].view(dt)
if column.dim and len(vla_shape) > 1:
# The VLA is reshaped consistently with TDIM instructions
if vla_shape[0] == 1:
dummy[idx] = dummy[idx].reshape(1, len(dummy[idx]))
else:
vla_dim = vla_shape[1:]
vla_first = int(len(dummy[idx]) / np.prod(vla_dim))
dummy[idx] = dummy[idx].reshape((vla_first,) + vla_dim)
dummy[idx].dtype = dummy[idx].dtype.newbyteorder(">")
# Each array in the field may now require additional
# scaling depending on the other scaling parameters
# TODO: The same scaling parameters apply to every
# array in the column so this is currently very slow; we
# really only need to check once whether any scaling will
# be necessary and skip this step if not
# TODO: Test that this works for X format; I don't think
# that it does--the recformat variable only applies to the P
# format not the X format
dummy[idx] = self._convert_other(column, dummy[idx], recformat)
return dummy
def _convert_ascii(self, column, field):
"""
Special handling for ASCII table columns to convert columns containing
numeric types to actual numeric arrays from the string representation.
"""
format = column.format
recformat = getattr(format, "recformat", ASCII2NUMPY[format[0]])
# if the string = TNULL, return ASCIITNULL
nullval = str(column.null).strip().encode("ascii")
if len(nullval) > format.width:
nullval = nullval[: format.width]
# Before using .replace make sure that any trailing bytes in each
# column are filled with spaces, and *not*, say, nulls; this causes
# functions like replace to potentially leave gibberish bytes in the
# array buffer.
dummy = np.char.ljust(field, format.width)
dummy = np.char.replace(dummy, encode_ascii("D"), encode_ascii("E"))
null_fill = encode_ascii(str(ASCIITNULL).rjust(format.width))
# Convert all fields equal to the TNULL value (nullval) to empty fields.
# TODO: These fields really should be converted to NaN or something else undefined.
# Currently they are converted to empty fields, which are then set to zero.
dummy = np.where(np.char.strip(dummy) == nullval, null_fill, dummy)
# always replace empty fields, see https://github.com/astropy/astropy/pull/5394
if nullval != b"":
dummy = np.where(np.char.strip(dummy) == b"", null_fill, dummy)
try:
dummy = np.array(dummy, dtype=recformat)
except ValueError as exc:
indx = self.names.index(column.name)
raise ValueError(
"{}; the header may be missing the necessary TNULL{} "
"keyword or the table contains invalid data".format(exc, indx + 1)
)
return dummy
def _convert_other(self, column, field, recformat):
"""Perform conversions on any other fixed-width column data types.
This may not perform any conversion at all if it's not necessary, in
which case the original column array is returned.
"""
if isinstance(recformat, _FormatX):
# special handling for the X format
return self._convert_x(field, recformat)
(
_str,
_bool,
_number,
_scale,
_zero,
bscale,
bzero,
dim,
) = self._get_scale_factors(column)
indx = self.names.index(column.name)
# ASCII table, convert strings to numbers
# TODO:
# For now, check that these are ASCII columns by checking the coldefs
# type; in the future all columns (for binary tables, ASCII tables, or
# otherwise) should "know" what type they are already and how to handle
# converting their data from FITS format to native format and vice
# versa...
if not _str and isinstance(self._coldefs, _AsciiColDefs):
field = self._convert_ascii(column, field)
# Test that the dimensions given in dim are sensible; otherwise
# display a warning and ignore them
if dim:
# See if the dimensions already match, if not, make sure the
# number items will fit in the specified dimensions
if field.ndim > 1:
actual_shape = field.shape[1:]
if _str:
actual_shape = actual_shape + (field.itemsize,)
else:
actual_shape = field.shape[0]
if dim == actual_shape:
# The array already has the correct dimensions, so we
# ignore dim and don't convert
dim = None
else:
nitems = reduce(operator.mul, dim)
if _str:
actual_nitems = field.itemsize
elif (
len(field.shape) == 1
): # No repeat count in TFORMn, equivalent to 1
actual_nitems = 1
else:
actual_nitems = field.shape[1]
if nitems > actual_nitems and not isinstance(recformat, _FormatP):
warnings.warn(
"TDIM{} value {:d} does not fit with the size of "
"the array items ({:d}). TDIM{:d} will be ignored.".format(
indx + 1, self._coldefs[indx].dims, actual_nitems, indx + 1
)
)
dim = None
# further conversion for both ASCII and binary tables
# For now we've made columns responsible for *knowing* whether their
# data has been scaled, but we make the FITS_rec class responsible for
# actually doing the scaling
# TODO: This also needs to be fixed in the effort to make Columns
# responsible for scaling their arrays to/from FITS native values
if not column.ascii and column.format.p_format:
format_code = column.format.p_format
else:
# TODO: Rather than having this if/else it might be nice if the
# ColumnFormat class had an attribute guaranteed to give the format
# of actual values in a column regardless of whether the true
# format is something like P or Q
format_code = column.format.format
if _number and (_scale or _zero) and not column._physical_values:
# This is to handle pseudo unsigned ints in table columns
# TODO: For now this only really works correctly for binary tables
# Should it work for ASCII tables as well?
if self._uint:
if bzero == 2**15 and format_code == "I":
field = np.array(field, dtype=np.uint16)
elif bzero == 2**31 and format_code == "J":
field = np.array(field, dtype=np.uint32)
elif bzero == 2**63 and format_code == "K":
field = np.array(field, dtype=np.uint64)
bzero64 = np.uint64(2**63)
else:
field = np.array(field, dtype=np.float64)
else:
field = np.array(field, dtype=np.float64)
if _scale:
np.multiply(field, bscale, field)
if _zero:
if self._uint and format_code == "K":
# There is a chance of overflow, so be careful
test_overflow = field.copy()
try:
test_overflow += bzero64
except OverflowError:
warnings.warn(
"Overflow detected while applying TZERO{:d}. "
"Returning unscaled data.".format(indx + 1)
)
else:
field = test_overflow
else:
field += bzero
# mark the column as scaled
column._physical_values = True
elif _bool and field.dtype != bool:
field = np.equal(field, ord("T"))
elif _str:
if not self._character_as_bytes:
with suppress(UnicodeDecodeError):
field = decode_ascii(field)
if dim and not isinstance(recformat, _FormatP):
# Apply the new field item dimensions
nitems = reduce(operator.mul, dim)
if field.ndim > 1:
field = field[:, :nitems]
if _str:
fmt = field.dtype.char
dtype = (f"|{fmt}{dim[-1]}", dim[:-1])
field.dtype = dtype
else:
field.shape = (field.shape[0],) + dim
return field
def _get_heap_data(self):
"""
Returns a pointer into the table's raw data to its heap (if present).
This is returned as a numpy byte array.
"""
if self._heapsize:
raw_data = self._get_raw_data().view(np.ubyte)
heap_end = self._heapoffset + self._heapsize
return raw_data[self._heapoffset : heap_end]
else:
return np.array([], dtype=np.ubyte)
def _get_raw_data(self):
"""
Returns the base array of self that "raw data array" that is the
array in the format that it was first read from a file before it was
sliced or viewed as a different type in any way.
This is determined by walking through the bases until finding one that
has at least the same number of bytes as self, plus the heapsize. This
may be the immediate .base but is not always. This is used primarily
for variable-length array support which needs to be able to find the
heap (the raw data *may* be larger than nbytes + heapsize if it
contains a gap or padding).
May return ``None`` if no array resembling the "raw data" according to
the stated criteria can be found.
"""
raw_data_bytes = self.nbytes + self._heapsize
base = self
while hasattr(base, "base") and base.base is not None:
base = base.base
# Variable-length-arrays: should take into account the case of
# empty arrays
if hasattr(base, "_heapoffset"):
if hasattr(base, "nbytes") and base.nbytes > raw_data_bytes:
return base
# non variable-length-arrays
else:
if hasattr(base, "nbytes") and base.nbytes >= raw_data_bytes:
return base
def _get_scale_factors(self, column):
"""Get all the scaling flags and factors for one column."""
# TODO: Maybe this should be a method/property on Column? Or maybe
# it's not really needed at all...
_str = column.format.format == "A"
_bool = column.format.format == "L"
_number = not (_bool or _str)
bscale = column.bscale
bzero = column.bzero
_scale = bscale not in ("", None, 1)
_zero = bzero not in ("", None, 0)
# ensure bscale/bzero are numbers
if not _scale:
bscale = 1
if not _zero:
bzero = 0
# column._dims gives a tuple, rather than column.dim which returns the
# original string format code from the FITS header...
dim = column._dims
return (_str, _bool, _number, _scale, _zero, bscale, bzero, dim)
def _scale_back(self, update_heap_pointers=True):
"""
Update the parent array, using the (latest) scaled array.
If ``update_heap_pointers`` is `False`, this will leave all the heap
pointers in P/Q columns as they are verbatim--it only makes sense to do
this if there is already data on the heap and it can be guaranteed that
that data has not been modified, and there is not new data to add to
the heap. Currently this is only used as an optimization for
CompImageHDU that does its own handling of the heap.
"""
# Running total for the new heap size
heapsize = 0
for indx, name in enumerate(self.dtype.names):
column = self._coldefs[indx]
recformat = column.format.recformat
raw_field = _get_recarray_field(self, indx)
# add the location offset of the heap area for each
# variable length column
if isinstance(recformat, _FormatP):
# Irritatingly, this can return a different dtype than just
# doing np.dtype(recformat.dtype); but this returns the results
# that we want. For example if recformat.dtype is 'a' we want
# an array of characters.
dtype = np.array([], dtype=recformat.dtype).dtype
if update_heap_pointers and name in self._converted:
# The VLA has potentially been updated, so we need to
# update the array descriptors
raw_field[:] = 0 # reset
npts = [np.prod(arr.shape) for arr in self._converted[name]]
raw_field[: len(npts), 0] = npts
raw_field[1:, 1] = (
np.add.accumulate(raw_field[:-1, 0]) * dtype.itemsize
)
raw_field[:, 1][:] += heapsize
heapsize += raw_field[:, 0].sum() * dtype.itemsize
# Even if this VLA has not been read or updated, we need to
# include the size of its constituent arrays in the heap size
# total
if heapsize >= 2**31:
raise ValueError(
"The heapsize limit for 'P' format "
"has been reached. "
"Please consider using the 'Q' format "
"for your file."
)
if isinstance(recformat, _FormatX) and name in self._converted:
_wrapx(self._converted[name], raw_field, recformat.repeat)
continue
(
_str,
_bool,
_number,
_scale,
_zero,
bscale,
bzero,
_,
) = self._get_scale_factors(column)
field = self._converted.get(name, raw_field)
# conversion for both ASCII and binary tables
if _number or _str:
if _number and (_scale or _zero) and column._physical_values:
dummy = field.copy()
if _zero:
dummy -= bzero
if _scale:
dummy /= bscale
# This will set the raw values in the recarray back to
# their non-physical storage values, so the column should
# be mark is not scaled
column._physical_values = False
elif _str or isinstance(self._coldefs, _AsciiColDefs):
dummy = field
else:
continue
# ASCII table, convert numbers to strings
if isinstance(self._coldefs, _AsciiColDefs):
self._scale_back_ascii(indx, dummy, raw_field)
# binary table string column
elif isinstance(raw_field, chararray.chararray):
self._scale_back_strings(indx, dummy, raw_field)
# all other binary table columns
else:
if len(raw_field) and isinstance(raw_field[0], np.integer):
dummy = np.around(dummy)
if raw_field.shape == dummy.shape:
raw_field[:] = dummy
else:
# Reshaping the data is necessary in cases where the
# TDIMn keyword was used to shape a column's entries
# into arrays
raw_field[:] = dummy.ravel().view(raw_field.dtype)
del dummy
# ASCII table does not have Boolean type
elif _bool and name in self._converted:
choices = (
np.array([ord("F")], dtype=np.int8)[0],
np.array([ord("T")], dtype=np.int8)[0],
)
raw_field[:] = np.choose(field, choices)
# Store the updated heapsize
self._heapsize = heapsize
def _scale_back_strings(self, col_idx, input_field, output_field):
# There are a few possibilities this has to be able to handle properly
# The input_field, which comes from the _converted column is of dtype
# 'Un' so that elements read out of the array are normal str
# objects (i.e. unicode strings)
#
# At the other end the *output_field* may also be of type 'S' or of
# type 'U'. It will *usually* be of type 'S' because when reading
# an existing FITS table the raw data is just ASCII strings, and
# represented in Numpy as an S array. However, when a user creates
# a new table from scratch, they *might* pass in a column containing
# unicode strings (dtype 'U'). Therefore the output_field of the
# raw array is actually a unicode array. But we still want to make
# sure the data is encodable as ASCII. Later when we write out the
# array we use, in the dtype 'U' case, a different write routine
# that writes row by row and encodes any 'U' columns to ASCII.
# If the output_field is non-ASCII we will worry about ASCII encoding
# later when writing; otherwise we can do it right here
if input_field.dtype.kind == "U" and output_field.dtype.kind == "S":
try:
_ascii_encode(input_field, out=output_field)
except _UnicodeArrayEncodeError as exc:
raise ValueError(
"Could not save column '{}': Contains characters that "
"cannot be encoded as ASCII as required by FITS, starting "
"at the index {!r} of the column, and the index {} of "
"the string at that location.".format(
self._coldefs[col_idx].name,
exc.index[0] if len(exc.index) == 1 else exc.index,
exc.start,
)
)
else:
# Otherwise go ahead and do a direct copy into--if both are type
# 'U' we'll handle encoding later
input_field = input_field.flatten().view(output_field.dtype)
output_field.flat[:] = input_field
# Ensure that blanks at the end of each string are
# converted to nulls instead of spaces, see Trac #15
# and #111
_rstrip_inplace(output_field)
def _scale_back_ascii(self, col_idx, input_field, output_field):
"""
Convert internal array values back to ASCII table representation.
The ``input_field`` is the internal representation of the values, and
the ``output_field`` is the character array representing the ASCII
output that will be written.
"""
starts = self._coldefs.starts[:]
spans = self._coldefs.spans
format = self._coldefs[col_idx].format
# The the index of the "end" column of the record, beyond
# which we can't write
end = super().field(-1).itemsize
starts.append(end + starts[-1])
if col_idx > 0:
lead = starts[col_idx] - starts[col_idx - 1] - spans[col_idx - 1]
else:
lead = 0
if lead < 0:
warnings.warn(
f"Column {col_idx + 1} starting point overlaps the previous column."
)
trail = starts[col_idx + 1] - starts[col_idx] - spans[col_idx]
if trail < 0:
warnings.warn(
f"Column {col_idx + 1} ending point overlaps the next column."
)
# TODO: It would be nice if these string column formatting
# details were left to a specialized class, as is the case
# with FormatX and FormatP
if "A" in format:
_pc = "{:"
else:
_pc = "{:>"
fmt = "".join([_pc, format[1:], ASCII2STR[format[0]], "}", (" " * trail)])
# Even if the format precision is 0, we should output a decimal point
# as long as there is space to do so--not including a decimal point in
# a float value is discouraged by the FITS Standard
trailing_decimal = format.precision == 0 and format.format in ("F", "E", "D")
# not using numarray.strings's num2char because the
# result is not allowed to expand (as C/Python does).
for jdx, value in enumerate(input_field):
value = fmt.format(value)
if len(value) > starts[col_idx + 1] - starts[col_idx]:
raise ValueError(
"Value {!r} does not fit into the output's itemsize of {}.".format(
value, spans[col_idx]
)
)
if trailing_decimal and value[0] == " ":
# We have some extra space in the field for the trailing
# decimal point
value = value[1:] + "."
output_field[jdx] = value
# Replace exponent separator in floating point numbers
if "D" in format:
output_field[:] = output_field.replace(b"E", b"D")
def tolist(self):
# Override .tolist to take care of special case of VLF
column_lists = [self[name].tolist() for name in self.columns.names]
return [list(row) for row in zip(*column_lists)]
def _get_recarray_field(array, key):
"""
Compatibility function for using the recarray base class's field method.
This incorporates the legacy functionality of returning string arrays as
Numeric-style chararray objects.
"""
# Numpy >= 1.10.dev recarray no longer returns chararrays for strings
# This is currently needed for backwards-compatibility and for
# automatic truncation of trailing whitespace
field = np.recarray.field(array, key)
if field.dtype.char in ("S", "U") and not isinstance(field, chararray.chararray):
field = field.view(chararray.chararray)
return field
class _UnicodeArrayEncodeError(UnicodeEncodeError):
def __init__(self, encoding, object_, start, end, reason, index):
super().__init__(encoding, object_, start, end, reason)
self.index = index
def _ascii_encode(inarray, out=None):
"""
Takes a unicode array and fills the output string array with the ASCII
encodings (if possible) of the elements of the input array. The two arrays
must be the same size (though not necessarily the same shape).
This is like an inplace version of `np.char.encode` though simpler since
it's only limited to ASCII, and hence the size of each character is
guaranteed to be 1 byte.
If any strings are non-ASCII an UnicodeArrayEncodeError is raised--this is
just a `UnicodeEncodeError` with an additional attribute for the index of
the item that couldn't be encoded.
"""
out_dtype = np.dtype((f"S{inarray.dtype.itemsize // 4}", inarray.dtype.shape))
if out is not None:
out = out.view(out_dtype)
op_dtypes = [inarray.dtype, out_dtype]
op_flags = [["readonly"], ["writeonly", "allocate"]]
it = np.nditer(
[inarray, out], op_dtypes=op_dtypes, op_flags=op_flags, flags=["zerosize_ok"]
)
try:
for initem, outitem in it:
outitem[...] = initem.item().encode("ascii")
except UnicodeEncodeError as exc:
index = np.unravel_index(it.iterindex, inarray.shape)
raise _UnicodeArrayEncodeError(*(exc.args + (index,)))
return it.operands[1]
def _has_unicode_fields(array):
"""
Returns True if any fields in a structured array have Unicode dtype.
"""
dtypes = (d[0] for d in array.dtype.fields.values())
return any(d.kind == "U" for d in dtypes)
|
1be35a754b0fa76f7f27e61d9d50c419e7f55b5fbedf5bba5ec312bf61acc5e4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import re
import warnings
from collections import OrderedDict, defaultdict
import numpy as np
from astropy import units as u
from astropy.coordinates import EarthLocation
from astropy.table import Column, MaskedColumn
from astropy.table.column import col_copy
from astropy.time import Time, TimeDelta
from astropy.time.core import BARYCENTRIC_SCALES
from astropy.time.formats import FITS_DEPRECATED_SCALES
from astropy.utils.exceptions import AstropyUserWarning
from . import Card, Header
# The following is based on the FITS WCS Paper IV, "Representations of time
# coordinates in FITS".
# https://ui.adsabs.harvard.edu/abs/2015A%26A...574A..36R
# FITS WCS standard specified "4-3" form for non-linear coordinate types
TCTYP_RE_TYPE = re.compile(r"(?P<type>[A-Z]+)[-]+")
TCTYP_RE_ALGO = re.compile(r"(?P<algo>[A-Z]+)\s*")
# FITS Time standard specified time units
FITS_TIME_UNIT = ["s", "d", "a", "cy", "min", "h", "yr", "ta", "Ba"]
# Global time reference coordinate keywords
TIME_KEYWORDS = (
"TIMESYS",
"MJDREF",
"JDREF",
"DATEREF",
"TREFPOS",
"TREFDIR",
"TIMEUNIT",
"TIMEOFFS",
"OBSGEO-X",
"OBSGEO-Y",
"OBSGEO-Z",
"OBSGEO-L",
"OBSGEO-B",
"OBSGEO-H",
"DATE",
"DATE-OBS",
"DATE-AVG",
"DATE-BEG",
"DATE-END",
"MJD-OBS",
"MJD-AVG",
"MJD-BEG",
"MJD-END",
)
# Column-specific time override keywords
COLUMN_TIME_KEYWORDS = ("TCTYP", "TCUNI", "TRPOS")
# Column-specific keywords regex
COLUMN_TIME_KEYWORD_REGEXP = f"({'|'.join(COLUMN_TIME_KEYWORDS)})[0-9]+"
def is_time_column_keyword(keyword):
"""
Check if the FITS header keyword is a time column-specific keyword.
Parameters
----------
keyword : str
FITS keyword.
"""
return re.match(COLUMN_TIME_KEYWORD_REGEXP, keyword) is not None
# Set astropy time global information
GLOBAL_TIME_INFO = {
"TIMESYS": ("UTC", "Default time scale"),
"JDREF": (0.0, "Time columns are jd = jd1 + jd2"),
"TREFPOS": ("TOPOCENTER", "Time reference position"),
}
def _verify_global_info(global_info):
"""
Given the global time reference frame information, verify that
each global time coordinate attribute will be given a valid value.
Parameters
----------
global_info : dict
Global time reference frame information.
"""
# Translate FITS deprecated scale into astropy scale, or else just convert
# to lower case for further checks.
global_info["scale"] = FITS_DEPRECATED_SCALES.get(
global_info["TIMESYS"], global_info["TIMESYS"].lower()
)
# Verify global time scale
if global_info["scale"] not in Time.SCALES:
# 'GPS' and 'LOCAL' are FITS recognized time scale values
# but are not supported by astropy.
if global_info["scale"] == "gps":
warnings.warn(
"Global time scale (TIMESYS) has a FITS recognized time scale "
'value "GPS". In Astropy, "GPS" is a time from epoch format '
"which runs synchronously with TAI; GPS is approximately 19 s "
"ahead of TAI. Hence, this format will be used.",
AstropyUserWarning,
)
# Assume that the values are in GPS format
global_info["scale"] = "tai"
global_info["format"] = "gps"
if global_info["scale"] == "local":
warnings.warn(
"Global time scale (TIMESYS) has a FITS recognized time scale "
'value "LOCAL". However, the standard states that "LOCAL" should be '
"tied to one of the existing scales because it is intrinsically "
"unreliable and/or ill-defined. Astropy will thus use the default "
'global time scale "UTC" instead of "LOCAL".',
AstropyUserWarning,
)
# Default scale 'UTC'
global_info["scale"] = "utc"
global_info["format"] = None
else:
raise AssertionError(
"Global time scale (TIMESYS) should have a FITS recognized "
"time scale value (got {!r}). The FITS standard states that "
"the use of local time scales should be restricted to alternate "
"coordinates.".format(global_info["TIMESYS"])
)
else:
# Scale is already set
global_info["format"] = None
# Check if geocentric global location is specified
obs_geo = [
global_info[attr]
for attr in ("OBSGEO-X", "OBSGEO-Y", "OBSGEO-Z")
if attr in global_info
]
# Location full specification is (X, Y, Z)
if len(obs_geo) == 3:
global_info["location"] = EarthLocation.from_geocentric(*obs_geo, unit=u.m)
else:
# Check if geodetic global location is specified (since geocentric failed)
# First warn the user if geocentric location is partially specified
if obs_geo:
warnings.warn(
"The geocentric observatory location {} is not completely "
"specified (X, Y, Z) and will be ignored.".format(obs_geo),
AstropyUserWarning,
)
# Check geodetic location
obs_geo = [
global_info[attr]
for attr in ("OBSGEO-L", "OBSGEO-B", "OBSGEO-H")
if attr in global_info
]
if len(obs_geo) == 3:
global_info["location"] = EarthLocation.from_geodetic(*obs_geo)
else:
# Since both geocentric and geodetic locations are not specified,
# location will be None.
# Warn the user if geodetic location is partially specified
if obs_geo:
warnings.warn(
"The geodetic observatory location {} is not completely "
"specified (lon, lat, alt) and will be ignored.".format(obs_geo),
AstropyUserWarning,
)
global_info["location"] = None
# Get global time reference
# Keywords are listed in order of precedence, as stated by the standard
for key, format_ in (("MJDREF", "mjd"), ("JDREF", "jd"), ("DATEREF", "fits")):
if key in global_info:
global_info["ref_time"] = {"val": global_info[key], "format": format_}
break
else:
# If none of the three keywords is present, MJDREF = 0.0 must be assumed
global_info["ref_time"] = {"val": 0, "format": "mjd"}
def _verify_column_info(column_info, global_info):
"""
Given the column-specific time reference frame information, verify that
each column-specific time coordinate attribute has a valid value.
Return True if the coordinate column is time, or else return False.
Parameters
----------
global_info : dict
Global time reference frame information.
column_info : dict
Column-specific time reference frame override information.
"""
scale = column_info.get("TCTYP", None)
unit = column_info.get("TCUNI", None)
location = column_info.get("TRPOS", None)
if scale is not None:
# Non-linear coordinate types have "4-3" form and are not time coordinates
if TCTYP_RE_TYPE.match(scale[:5]) and TCTYP_RE_ALGO.match(scale[5:]):
return False
elif scale.lower() in Time.SCALES:
column_info["scale"] = scale.lower()
column_info["format"] = None
elif scale in FITS_DEPRECATED_SCALES.keys():
column_info["scale"] = FITS_DEPRECATED_SCALES[scale]
column_info["format"] = None
# TCTYPn (scale) = 'TIME' indicates that the column scale is
# controlled by the global scale.
elif scale == "TIME":
column_info["scale"] = global_info["scale"]
column_info["format"] = global_info["format"]
elif scale == "GPS":
warnings.warn(
'Table column "{}" has a FITS recognized time scale value "GPS". '
'In Astropy, "GPS" is a time from epoch format which runs '
"synchronously with TAI; GPS runs ahead of TAI approximately "
"by 19 s. Hence, this format will be used.".format(column_info),
AstropyUserWarning,
)
column_info["scale"] = "tai"
column_info["format"] = "gps"
elif scale == "LOCAL":
warnings.warn(
'Table column "{}" has a FITS recognized time scale value "LOCAL". '
'However, the standard states that "LOCAL" should be tied to one '
"of the existing scales because it is intrinsically unreliable "
"and/or ill-defined. Astropy will thus use the global time scale "
"(TIMESYS) as the default.".format(column_info),
AstropyUserWarning,
)
column_info["scale"] = global_info["scale"]
column_info["format"] = global_info["format"]
else:
# Coordinate type is either an unrecognized local time scale
# or a linear coordinate type
return False
# If TCUNIn is a time unit or TRPOSn is specified, the column is a time
# coordinate. This has to be tested since TCTYP (scale) is not specified.
elif (unit is not None and unit in FITS_TIME_UNIT) or location is not None:
column_info["scale"] = global_info["scale"]
column_info["format"] = global_info["format"]
# None of the conditions for time coordinate columns is satisfied
else:
return False
# Check if column-specific reference position TRPOSn is specified
if location is not None:
# Observatory position (location) needs to be specified only
# for 'TOPOCENTER'.
if location == "TOPOCENTER":
column_info["location"] = global_info["location"]
if column_info["location"] is None:
warnings.warn(
'Time column reference position "TRPOSn" value is "TOPOCENTER". '
"However, the observatory position is not properly specified. "
"The FITS standard does not support this and hence reference "
"position will be ignored.",
AstropyUserWarning,
)
else:
column_info["location"] = None
# Warn user about ignoring global reference position when TRPOSn is
# not specified
elif global_info["TREFPOS"] == "TOPOCENTER":
if global_info["location"] is not None:
warnings.warn(
'Time column reference position "TRPOSn" is not specified. The '
'default value for it is "TOPOCENTER", and the observatory position '
"has been specified. However, for supporting column-specific location, "
"reference position will be ignored for this column.",
AstropyUserWarning,
)
column_info["location"] = None
else:
column_info["location"] = None
# Get reference time
column_info["ref_time"] = global_info["ref_time"]
return True
def _get_info_if_time_column(col, global_info):
"""
Check if a column without corresponding time column keywords in the
FITS header represents time or not. If yes, return the time column
information needed for its conversion to Time.
This is only applicable to the special-case where a column has the
name 'TIME' and a time unit.
"""
# Column with TTYPEn = 'TIME' and lacking any TC*n or time
# specific keywords will be controlled by the global keywords.
if col.info.name.upper() == "TIME" and col.info.unit in FITS_TIME_UNIT:
column_info = {
"scale": global_info["scale"],
"format": global_info["format"],
"ref_time": global_info["ref_time"],
"location": None,
}
if global_info["TREFPOS"] == "TOPOCENTER":
column_info["location"] = global_info["location"]
if column_info["location"] is None:
warnings.warn(
'Time column "{}" reference position will be ignored '
"due to unspecified observatory position.".format(col.info.name),
AstropyUserWarning,
)
return column_info
return None
def _convert_global_time(table, global_info):
"""
Convert the table metadata for time informational keywords
to astropy Time.
Parameters
----------
table : `~astropy.table.Table`
The table whose time metadata is to be converted.
global_info : dict
Global time reference frame information.
"""
# Read in Global Informational keywords as Time
for key, value in global_info.items():
# FITS uses a subset of ISO-8601 for DATE-xxx
if key not in table.meta:
try:
table.meta[key] = _convert_time_key(global_info, key)
except ValueError:
pass
def _convert_time_key(global_info, key):
"""
Convert a time metadata key to a Time object.
Parameters
----------
global_info : dict
Global time reference frame information.
key : str
Time key.
Returns
-------
astropy.time.Time
Raises
------
ValueError
If key is not a valid global time keyword.
"""
value = global_info[key]
if key.startswith("DATE"):
scale = "utc" if key == "DATE" else global_info["scale"]
precision = len(value.split(".")[-1]) if "." in value else 0
return Time(value, format="fits", scale=scale, precision=precision)
# MJD-xxx in MJD according to TIMESYS
elif key.startswith("MJD-"):
return Time(value, format="mjd", scale=global_info["scale"])
else:
raise ValueError("Key is not a valid global time keyword")
def _convert_time_column(col, column_info):
"""
Convert time columns to astropy Time columns.
Parameters
----------
col : `~astropy.table.Column`
The time coordinate column to be converted to Time.
column_info : dict
Column-specific time reference frame override information.
"""
# The code might fail while attempting to read FITS files not written by astropy.
try:
# ISO-8601 is the only string representation of time in FITS
if col.info.dtype.kind in ["S", "U"]:
# [+/-C]CCYY-MM-DD[Thh:mm:ss[.s...]] where the number of characters
# from index 20 to the end of string represents the precision
precision = max(int(col.info.dtype.str[2:]) - 20, 0)
return Time(
col,
format="fits",
scale=column_info["scale"],
precision=precision,
location=column_info["location"],
)
if column_info["format"] == "gps":
return Time(col, format="gps", location=column_info["location"])
# If reference value is 0 for JD or MJD, the column values can be
# directly converted to Time, as they are absolute (relative
# to a globally accepted zero point).
if column_info["ref_time"]["val"] == 0 and column_info["ref_time"][
"format"
] in ["jd", "mjd"]:
# (jd1, jd2) where jd = jd1 + jd2
if col.shape[-1] == 2 and col.ndim > 1:
return Time(
col[..., 0],
col[..., 1],
scale=column_info["scale"],
format=column_info["ref_time"]["format"],
location=column_info["location"],
)
else:
return Time(
col,
scale=column_info["scale"],
format=column_info["ref_time"]["format"],
location=column_info["location"],
)
# Reference time
ref_time = Time(
column_info["ref_time"]["val"],
scale=column_info["scale"],
format=column_info["ref_time"]["format"],
location=column_info["location"],
)
# Elapsed time since reference time
if col.shape[-1] == 2 and col.ndim > 1:
delta_time = TimeDelta(col[..., 0], col[..., 1])
else:
delta_time = TimeDelta(col)
return ref_time + delta_time
except Exception as err:
warnings.warn(
'The exception "{}" was encountered while trying to convert the time '
'column "{}" to Astropy Time.'.format(err, col.info.name),
AstropyUserWarning,
)
return col
def fits_to_time(hdr, table):
"""
Read FITS binary table time columns as `~astropy.time.Time`.
This method reads the metadata associated with time coordinates, as
stored in a FITS binary table header, converts time columns into
`~astropy.time.Time` columns and reads global reference times as
`~astropy.time.Time` instances.
Parameters
----------
hdr : `~astropy.io.fits.header.Header`
FITS Header
table : `~astropy.table.Table`
The table whose time columns are to be read as Time
Returns
-------
hdr : `~astropy.io.fits.header.Header`
Modified FITS Header (time metadata removed)
"""
# Set defaults for global time scale, reference, etc.
global_info = {"TIMESYS": "UTC", "TREFPOS": "TOPOCENTER"}
# Set default dictionary for time columns
time_columns = defaultdict(OrderedDict)
# Make a "copy" (not just a view) of the input header, since it
# may get modified. the data is still a "view" (for now)
hcopy = hdr.copy(strip=True)
# Scan the header for global and column-specific time keywords
for key, value, comment in hdr.cards:
if key in TIME_KEYWORDS:
global_info[key] = value
hcopy.remove(key)
elif is_time_column_keyword(key):
base, idx = re.match(r"([A-Z]+)([0-9]+)", key).groups()
time_columns[int(idx)][base] = value
hcopy.remove(key)
elif value in ("OBSGEO-X", "OBSGEO-Y", "OBSGEO-Z") and re.match(
"TTYPE[0-9]+", key
):
global_info[value] = table[value]
# Verify and get the global time reference frame information
_verify_global_info(global_info)
_convert_global_time(table, global_info)
# Columns with column-specific time (coordinate) keywords
if time_columns:
for idx, column_info in time_columns.items():
# Check if the column is time coordinate (not spatial)
if _verify_column_info(column_info, global_info):
colname = table.colnames[idx - 1]
# Convert to Time
table[colname] = _convert_time_column(table[colname], column_info)
# Check for special-cases of time coordinate columns
for idx, colname in enumerate(table.colnames):
if (idx + 1) not in time_columns:
column_info = _get_info_if_time_column(table[colname], global_info)
if column_info:
table[colname] = _convert_time_column(table[colname], column_info)
return hcopy
def time_to_fits(table):
"""
Replace Time columns in a Table with non-mixin columns containing
each element as a vector of two doubles (jd1, jd2) and return a FITS
header with appropriate time coordinate keywords.
jd = jd1 + jd2 represents time in the Julian Date format with
high-precision.
Parameters
----------
table : `~astropy.table.Table`
The table whose Time columns are to be replaced.
Returns
-------
table : `~astropy.table.Table`
The table with replaced Time columns
hdr : `~astropy.io.fits.header.Header`
Header containing global time reference frame FITS keywords
"""
# Make a light copy of table (to the extent possible) and clear any indices along
# the way. Indices are not serialized and cause problems later, but they are not
# needed here so just drop. For Column subclasses take advantage of copy() method,
# but for others it is required to actually copy the data if there are attached
# indices. See #8077 and #9009 for further discussion.
new_cols = []
for col in table.itercols():
if isinstance(col, Column):
new_col = col.copy(copy_data=False) # Also drops any indices
else:
new_col = col_copy(col, copy_indices=False) if col.info.indices else col
new_cols.append(new_col)
newtable = table.__class__(new_cols, copy=False)
newtable.meta = table.meta
# Global time coordinate frame keywords
hdr = Header(
[
Card(keyword=key, value=val[0], comment=val[1])
for key, val in GLOBAL_TIME_INFO.items()
]
)
# Store coordinate column-specific metadata
newtable.meta["__coordinate_columns__"] = defaultdict(OrderedDict)
coord_meta = newtable.meta["__coordinate_columns__"]
time_cols = table.columns.isinstance(Time)
# Geocentric location
location = None
for col in time_cols:
# By default, Time objects are written in full precision, i.e. we store both
# jd1 and jd2 (serialize_method['fits'] = 'jd1_jd2'). Formatted values for
# Time can be stored if the user explicitly chooses to do so.
col_cls = MaskedColumn if col.masked else Column
if col.info.serialize_method["fits"] == "formatted_value":
newtable.replace_column(col.info.name, col_cls(col.value))
continue
# The following is necessary to deal with multi-dimensional ``Time`` objects
# (i.e. where Time.shape is non-trivial).
jd12 = np.stack([col.jd1, col.jd2], axis=-1)
# Roll the 0th (innermost) axis backwards, until it lies in the last position
# (jd12.ndim)
newtable.replace_column(col.info.name, col_cls(jd12, unit="d"))
# Time column-specific override keywords
coord_meta[col.info.name]["coord_type"] = col.scale.upper()
coord_meta[col.info.name]["coord_unit"] = "d"
# Time column reference position
if col.location is None:
coord_meta[col.info.name]["time_ref_pos"] = None
if location is not None:
warnings.warn(
'Time Column "{}" has no specified location, but global Time '
"Position is present, which will be the default for this column "
"in FITS specification.".format(col.info.name),
AstropyUserWarning,
)
else:
coord_meta[col.info.name]["time_ref_pos"] = "TOPOCENTER"
# Compatibility of Time Scales and Reference Positions
if col.scale in BARYCENTRIC_SCALES:
warnings.warn(
'Earth Location "TOPOCENTER" for Time Column "{}" is incompatible '
'with scale "{}".'.format(col.info.name, col.scale.upper()),
AstropyUserWarning,
)
if location is None:
# Set global geocentric location
location = col.location
if location.size > 1:
for dim in ("x", "y", "z"):
newtable.add_column(
Column(getattr(location, dim).to_value(u.m)),
name=f"OBSGEO-{dim.upper()}",
)
else:
hdr.extend(
[
Card(
keyword=f"OBSGEO-{dim.upper()}",
value=getattr(location, dim).to_value(u.m),
)
for dim in ("x", "y", "z")
]
)
elif np.any(location != col.location):
raise ValueError(
"Multiple Time Columns with different geocentric "
"observatory locations ({}, {}) encountered."
"This is not supported by the FITS standard.".format(
location, col.location
)
)
return newtable, hdr
|
3622fb1e27f0c54825a18c048eaa0aea858c0af48d41e59ecf3bc8178d2415de | # Licensed under a 3-clause BSD style license - see PYFITS.rst
"""Convenience functions for working with FITS files.
Convenience functions
=====================
The functions in this module provide shortcuts for some of the most basic
operations on FITS files, such as reading and updating the header. They are
included directly in the 'astropy.io.fits' namespace so that they can be used
like::
astropy.io.fits.getheader(...)
These functions are primarily for convenience when working with FITS files in
the command-line interpreter. If performing several operations on the same
file, such as in a script, it is better to *not* use these functions, as each
one must open and re-parse the file. In such cases it is better to use
:func:`astropy.io.fits.open` and work directly with the
:class:`astropy.io.fits.HDUList` object and underlying HDU objects.
Several of the convenience functions, such as `getheader` and `getdata` support
special arguments for selecting which HDU to use when working with a
multi-extension FITS file. There are a few supported argument formats for
selecting the HDU. See the documentation for `getdata` for an
explanation of all the different formats.
.. warning::
All arguments to convenience functions other than the filename that are
*not* for selecting the HDU should be passed in as keyword
arguments. This is to avoid ambiguity and conflicts with the
HDU arguments. For example, to set NAXIS=1 on the Primary HDU:
Wrong::
astropy.io.fits.setval('myimage.fits', 'NAXIS', 1)
The above example will try to set the NAXIS value on the first extension
HDU to blank. That is, the argument '1' is assumed to specify an
HDU.
Right::
astropy.io.fits.setval('myimage.fits', 'NAXIS', value=1)
This will set the NAXIS keyword to 1 on the primary HDU (the default). To
specify the first extension HDU use::
astropy.io.fits.setval('myimage.fits', 'NAXIS', value=1, ext=1)
This complexity arises out of the attempt to simultaneously support
multiple argument formats that were used in past versions of PyFITS.
Unfortunately, it is not possible to support all formats without
introducing some ambiguity. A future Astropy release may standardize
around a single format and officially deprecate the other formats.
"""
import operator
import os
import warnings
import numpy as np
from astropy.utils.exceptions import AstropyUserWarning
from .diff import FITSDiff, HDUDiff
from .file import FILE_MODES, _File
from .hdu.base import _BaseHDU, _ValidHDU
from .hdu.hdulist import HDUList, fitsopen
from .hdu.image import ImageHDU, PrimaryHDU
from .hdu.table import BinTableHDU
from .header import Header
from .util import (
_is_dask_array,
_is_int,
fileobj_closed,
fileobj_mode,
fileobj_name,
path_like,
)
__all__ = [
"getheader",
"getdata",
"getval",
"setval",
"delval",
"writeto",
"append",
"update",
"info",
"tabledump",
"tableload",
"table_to_hdu",
"printdiff",
]
def getheader(filename, *args, **kwargs):
"""
Get the header from an HDU of a FITS file.
Parameters
----------
filename : path-like or file-like
File to get header from. If an opened file object, its mode
must be one of the following rb, rb+, or ab+).
ext, extname, extver
The rest of the arguments are for HDU specification. See the
`getdata` documentation for explanations/examples.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
Returns
-------
header : `Header` object
"""
mode, closed = _get_file_mode(filename)
hdulist, extidx = _getext(filename, mode, *args, **kwargs)
try:
hdu = hdulist[extidx]
header = hdu.header
finally:
hdulist.close(closed=closed)
return header
def getdata(filename, *args, header=None, lower=None, upper=None, view=None, **kwargs):
"""
Get the data from an HDU of a FITS file (and optionally the
header).
Parameters
----------
filename : path-like or file-like
File to get data from. If opened, mode must be one of the
following rb, rb+, or ab+.
ext
The rest of the arguments are for HDU specification.
They are flexible and are best illustrated by examples.
No extra arguments implies the primary HDU::
getdata('in.fits')
.. note::
Exclusive to ``getdata``: if ``ext`` is not specified
and primary header contains no data, ``getdata`` attempts
to retrieve data from first extension HDU.
By HDU number::
getdata('in.fits', 0) # the primary HDU
getdata('in.fits', 2) # the second extension HDU
getdata('in.fits', ext=2) # the second extension HDU
By name, i.e., ``EXTNAME`` value (if unique)::
getdata('in.fits', 'sci')
getdata('in.fits', extname='sci') # equivalent
Note ``EXTNAME`` values are not case sensitive
By combination of ``EXTNAME`` and EXTVER`` as separate
arguments or as a tuple::
getdata('in.fits', 'sci', 2) # EXTNAME='SCI' & EXTVER=2
getdata('in.fits', extname='sci', extver=2) # equivalent
getdata('in.fits', ('sci', 2)) # equivalent
Ambiguous or conflicting specifications will raise an exception::
getdata('in.fits', ext=('sci',1), extname='err', extver=2)
header : bool, optional
If `True`, return the data and the header of the specified HDU as a
tuple.
lower, upper : bool, optional
If ``lower`` or ``upper`` are `True`, the field names in the
returned data object will be converted to lower or upper case,
respectively.
view : ndarray, optional
When given, the data will be returned wrapped in the given ndarray
subclass by calling::
data.view(view)
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
Returns
-------
array : ndarray or `~numpy.recarray` or `~astropy.io.fits.Group`
Type depends on the type of the extension being referenced.
If the optional keyword ``header`` is set to `True`, this
function will return a (``data``, ``header``) tuple.
Raises
------
IndexError
If no data is found in searched HDUs.
"""
mode, closed = _get_file_mode(filename)
ext = kwargs.get("ext")
extname = kwargs.get("extname")
extver = kwargs.get("extver")
ext_given = not (
len(args) == 0 and ext is None and extname is None and extver is None
)
hdulist, extidx = _getext(filename, mode, *args, **kwargs)
try:
hdu = hdulist[extidx]
data = hdu.data
if data is None:
if ext_given:
raise IndexError(f"No data in HDU #{extidx}.")
# fallback to the first extension HDU
if len(hdulist) == 1:
raise IndexError("No data in Primary HDU and no extension HDU found.")
hdu = hdulist[1]
data = hdu.data
if data is None:
raise IndexError("No data in either Primary or first extension HDUs.")
if header:
hdr = hdu.header
finally:
hdulist.close(closed=closed)
# Change case of names if requested
trans = None
if lower:
trans = operator.methodcaller("lower")
elif upper:
trans = operator.methodcaller("upper")
if trans:
if data.dtype.names is None:
# this data does not have fields
return
if data.dtype.descr[0][0] == "":
# this data does not have fields
return
data.dtype.names = [trans(n) for n in data.dtype.names]
# allow different views into the underlying ndarray. Keep the original
# view just in case there is a problem
if isinstance(view, type) and issubclass(view, np.ndarray):
data = data.view(view)
if header:
return data, hdr
else:
return data
def getval(filename, keyword, *args, **kwargs):
"""
Get a keyword's value from a header in a FITS file.
Parameters
----------
filename : path-like or file-like
Name of the FITS file, or file object (if opened, mode must be
one of the following rb, rb+, or ab+).
keyword : str
Keyword name
ext, extname, extver
The rest of the arguments are for HDU specification.
See `getdata` for explanations/examples.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function automatically specifies ``do_not_scale_image_data
= True`` when opening the file so that values can be retrieved from the
unmodified header.
Returns
-------
keyword value : str, int, or float
"""
if "do_not_scale_image_data" not in kwargs:
kwargs["do_not_scale_image_data"] = True
hdr = getheader(filename, *args, **kwargs)
return hdr[keyword]
def setval(
filename,
keyword,
*args,
value=None,
comment=None,
before=None,
after=None,
savecomment=False,
**kwargs,
):
"""
Set a keyword's value from a header in a FITS file.
If the keyword already exists, it's value/comment will be updated.
If it does not exist, a new card will be created and it will be
placed before or after the specified location. If no ``before`` or
``after`` is specified, it will be appended at the end.
When updating more than one keyword in a file, this convenience
function is a much less efficient approach compared with opening
the file for update, modifying the header, and closing the file.
Parameters
----------
filename : path-like or file-like
Name of the FITS file, or file object If opened, mode must be update
(rb+). An opened file object or `~gzip.GzipFile` object will be closed
upon return.
keyword : str
Keyword name
value : str, int, float, optional
Keyword value (default: `None`, meaning don't modify)
comment : str, optional
Keyword comment, (default: `None`, meaning don't modify)
before : str, int, optional
Name of the keyword, or index of the card before which the new card
will be placed. The argument ``before`` takes precedence over
``after`` if both are specified (default: `None`).
after : str, int, optional
Name of the keyword, or index of the card after which the new card will
be placed. (default: `None`).
savecomment : bool, optional
When `True`, preserve the current comment for an existing keyword. The
argument ``savecomment`` takes precedence over ``comment`` if both
specified. If ``comment`` is not specified then the current comment
will automatically be preserved (default: `False`).
ext, extname, extver
The rest of the arguments are for HDU specification.
See `getdata` for explanations/examples.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function automatically specifies ``do_not_scale_image_data
= True`` when opening the file so that values can be retrieved from the
unmodified header.
"""
if "do_not_scale_image_data" not in kwargs:
kwargs["do_not_scale_image_data"] = True
closed = fileobj_closed(filename)
hdulist, extidx = _getext(filename, "update", *args, **kwargs)
try:
if keyword in hdulist[extidx].header and savecomment:
comment = None
hdulist[extidx].header.set(keyword, value, comment, before, after)
finally:
hdulist.close(closed=closed)
def delval(filename, keyword, *args, **kwargs):
"""
Delete all instances of keyword from a header in a FITS file.
Parameters
----------
filename : path-like or file-like
Name of the FITS file, or file object If opened, mode must be update
(rb+). An opened file object or `~gzip.GzipFile` object will be closed
upon return.
keyword : str, int
Keyword name or index
ext, extname, extver
The rest of the arguments are for HDU specification.
See `getdata` for explanations/examples.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function automatically specifies ``do_not_scale_image_data
= True`` when opening the file so that values can be retrieved from the
unmodified header.
"""
if "do_not_scale_image_data" not in kwargs:
kwargs["do_not_scale_image_data"] = True
closed = fileobj_closed(filename)
hdulist, extidx = _getext(filename, "update", *args, **kwargs)
try:
del hdulist[extidx].header[keyword]
finally:
hdulist.close(closed=closed)
def writeto(
filename,
data,
header=None,
output_verify="exception",
overwrite=False,
checksum=False,
):
"""
Create a new FITS file using the supplied data/header.
Parameters
----------
filename : path-like or file-like
File to write to. If opened, must be opened in a writable binary
mode such as 'wb' or 'ab+'.
data : array or `~numpy.recarray` or `~astropy.io.fits.Group`
data to write to the new file
header : `Header` object, optional
the header associated with ``data``. If `None`, a header
of the appropriate type is created for the supplied data. This
argument is optional.
output_verify : str
Output verification option. Must be one of ``"fix"``, ``"silentfix"``,
``"ignore"``, ``"warn"``, or ``"exception"``. May also be any
combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``,
``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See
:ref:`astropy:verify` for more info.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
checksum : bool, optional
If `True`, adds both ``DATASUM`` and ``CHECKSUM`` cards to the
headers of all HDU's written to the file.
"""
hdu = _makehdu(data, header)
if hdu.is_image and not isinstance(hdu, PrimaryHDU):
hdu = PrimaryHDU(data, header=header)
hdu.writeto(
filename, overwrite=overwrite, output_verify=output_verify, checksum=checksum
)
def table_to_hdu(table, character_as_bytes=False):
"""
Convert an `~astropy.table.Table` object to a FITS
`~astropy.io.fits.BinTableHDU`.
Parameters
----------
table : astropy.table.Table
The table to convert.
character_as_bytes : bool
Whether to return bytes for string columns when accessed from the HDU.
By default this is `False` and (unicode) strings are returned, but for
large tables this may use up a lot of memory.
Returns
-------
table_hdu : `~astropy.io.fits.BinTableHDU`
The FITS binary table HDU.
"""
# Avoid circular imports
from .column import python_to_tdisp
from .connect import REMOVE_KEYWORDS, is_column_keyword
# Header to store Time related metadata
hdr = None
# Not all tables with mixin columns are supported
if table.has_mixin_columns:
# Import is done here, in order to avoid it at build time as erfa is not
# yet available then.
from astropy.table.column import BaseColumn
from astropy.time import Time
from astropy.units import Quantity
from .fitstime import time_to_fits
# Only those columns which are instances of BaseColumn, Quantity or Time can
# be written
unsupported_cols = table.columns.not_isinstance((BaseColumn, Quantity, Time))
if unsupported_cols:
unsupported_names = [col.info.name for col in unsupported_cols]
raise ValueError(
f"cannot write table with mixin column(s) {unsupported_names}"
)
time_cols = table.columns.isinstance(Time)
if time_cols:
table, hdr = time_to_fits(table)
# Create a new HDU object
tarray = table.as_array()
if isinstance(tarray, np.ma.MaskedArray):
# Fill masked values carefully:
# float column's default mask value needs to be Nan and
# string column's default mask should be an empty string.
# Note: getting the fill value for the structured array is
# more reliable than for individual columns for string entries.
# (no 'N/A' for a single-element string, where it should be 'N').
default_fill_value = np.ma.default_fill_value(tarray.dtype)
for colname, (coldtype, _) in tarray.dtype.fields.items():
if np.all(tarray.fill_value[colname] == default_fill_value[colname]):
# Since multi-element columns with dtypes such as '2f8' have
# a subdtype, we should look up the type of column on that.
coltype = (
coldtype.subdtype[0].type if coldtype.subdtype else coldtype.type
)
if issubclass(coltype, np.complexfloating):
tarray.fill_value[colname] = complex(np.nan, np.nan)
elif issubclass(coltype, np.inexact):
tarray.fill_value[colname] = np.nan
elif issubclass(coltype, np.character):
tarray.fill_value[colname] = ""
# TODO: it might be better to construct the FITS table directly from
# the Table columns, rather than go via a structured array.
table_hdu = BinTableHDU.from_columns(
tarray.filled(), header=hdr, character_as_bytes=character_as_bytes
)
for col in table_hdu.columns:
# Binary FITS tables support TNULL *only* for integer data columns
# TODO: Determine a schema for handling non-integer masked columns
# with non-default fill values in FITS (if at all possible).
int_formats = ("B", "I", "J", "K")
if not (col.format in int_formats or col.format.p_format in int_formats):
continue
fill_value = tarray[col.name].fill_value
col.null = fill_value.astype(int)
else:
table_hdu = BinTableHDU.from_columns(
tarray, header=hdr, character_as_bytes=character_as_bytes
)
# Set units and format display for output HDU
for col in table_hdu.columns:
if table[col.name].info.format is not None:
# check for boolean types, special format case
logical = table[col.name].info.dtype == bool
tdisp_format = python_to_tdisp(
table[col.name].info.format, logical_dtype=logical
)
if tdisp_format is not None:
col.disp = tdisp_format
unit = table[col.name].unit
if unit is not None:
# Local imports to avoid importing units when it is not required,
# e.g. for command-line scripts
from astropy.units import Unit
from astropy.units.format.fits import UnitScaleError
try:
col.unit = unit.to_string(format="fits")
except UnitScaleError:
scale = unit.scale
raise UnitScaleError(
f"The column '{col.name}' could not be stored in FITS "
f"format because it has a scale '({str(scale)})' that "
"is not recognized by the FITS standard. Either scale "
"the data or change the units."
)
except ValueError:
# Warn that the unit is lost, but let the details depend on
# whether the column was serialized (because it was a
# quantity), since then the unit can be recovered by astropy.
warning = (
f"The unit '{unit.to_string()}' could not be saved in "
"native FITS format "
)
if any(
"SerializedColumn" in item and "name: " + col.name in item
for item in table.meta.get("comments", [])
):
warning += (
"and hence will be lost to non-astropy fits readers. "
"Within astropy, the unit can roundtrip using QTable, "
"though one has to enable the unit before reading."
)
else:
warning += (
"and cannot be recovered in reading. It can roundtrip "
"within astropy by using QTable both to write and read "
"back, though one has to enable the unit before reading."
)
warnings.warn(warning, AstropyUserWarning)
else:
# Try creating a Unit to issue a warning if the unit is not
# FITS compliant
Unit(col.unit, format="fits", parse_strict="warn")
# Column-specific override keywords for coordinate columns
coord_meta = table.meta.pop("__coordinate_columns__", {})
for col_name, col_info in coord_meta.items():
col = table_hdu.columns[col_name]
# Set the column coordinate attributes from data saved earlier.
# Note: have to set these, even if we have no data.
for attr in "coord_type", "coord_unit":
setattr(col, attr, col_info.get(attr, None))
trpos = col_info.get("time_ref_pos", None)
if trpos is not None:
col.time_ref_pos = trpos
for key, value in table.meta.items():
if is_column_keyword(key.upper()) or key.upper() in REMOVE_KEYWORDS:
warnings.warn(
f"Meta-data keyword {key} will be ignored since it conflicts "
"with a FITS reserved keyword",
AstropyUserWarning,
)
continue
# Convert to FITS format
if key == "comments":
key = "comment"
if isinstance(value, list):
for item in value:
try:
table_hdu.header.append((key, item))
except ValueError:
warnings.warn(
f"Attribute `{key}` of type {type(value)} cannot be "
"added to FITS Header - skipping",
AstropyUserWarning,
)
else:
try:
table_hdu.header[key] = value
except ValueError:
warnings.warn(
f"Attribute `{key}` of type {type(value)} cannot be "
"added to FITS Header - skipping",
AstropyUserWarning,
)
return table_hdu
def append(filename, data, header=None, checksum=False, verify=True, **kwargs):
"""
Append the header/data to FITS file if filename exists, create if not.
If only ``data`` is supplied, a minimal header is created.
Parameters
----------
filename : path-like or file-like
File to write to. If opened, must be opened for update (rb+) unless it
is a new file, then it must be opened for append (ab+). A file or
`~gzip.GzipFile` object opened for update will be closed after return.
data : array, :class:`~astropy.table.Table`, or `~astropy.io.fits.Group`
The new data used for appending.
header : `Header` object, optional
The header associated with ``data``. If `None`, an appropriate header
will be created for the data object supplied.
checksum : bool, optional
When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards to the header
of the HDU when written to the file.
verify : bool, optional
When `True`, the existing FITS file will be read in to verify it for
correctness before appending. When `False`, content is simply appended
to the end of the file. Setting ``verify`` to `False` can be much
faster.
**kwargs
Additional arguments are passed to:
- `~astropy.io.fits.writeto` if the file does not exist or is empty.
In this case ``output_verify`` is the only possible argument.
- `~astropy.io.fits.open` if ``verify`` is True or if ``filename``
is a file object.
- Otherwise no additional arguments can be used.
"""
if isinstance(filename, path_like):
filename = os.path.expanduser(filename)
name, closed, noexist_or_empty = _stat_filename_or_fileobj(filename)
if noexist_or_empty:
#
# The input file or file like object either doesn't exits or is
# empty. Use the writeto convenience function to write the
# output to the empty object.
#
writeto(filename, data, header, checksum=checksum, **kwargs)
else:
hdu = _makehdu(data, header)
if isinstance(hdu, PrimaryHDU):
hdu = ImageHDU(data, header)
if verify or not closed:
f = fitsopen(filename, mode="append", **kwargs)
try:
f.append(hdu)
# Set a flag in the HDU so that only this HDU gets a checksum
# when writing the file.
hdu._output_checksum = checksum
finally:
f.close(closed=closed)
else:
f = _File(filename, mode="append")
try:
hdu._output_checksum = checksum
hdu._writeto(f)
finally:
f.close()
def update(filename, data, *args, **kwargs):
"""
Update the specified HDU with the input data/header.
Parameters
----------
filename : path-like or file-like
File to update. If opened, mode must be update (rb+). An opened file
object or `~gzip.GzipFile` object will be closed upon return.
data : array, `~astropy.table.Table`, or `~astropy.io.fits.Group`
The new data used for updating.
header : `Header` object, optional
The header associated with ``data``. If `None`, an appropriate header
will be created for the data object supplied.
ext, extname, extver
The rest of the arguments are flexible: the 3rd argument can be the
header associated with the data. If the 3rd argument is not a
`Header`, it (and other positional arguments) are assumed to be the
HDU specification(s). Header and HDU specs can also be
keyword arguments. For example::
update(file, dat, hdr, 'sci') # update the 'sci' extension
update(file, dat, 3) # update the 3rd extension HDU
update(file, dat, hdr, 3) # update the 3rd extension HDU
update(file, dat, 'sci', 2) # update the 2nd extension HDU named 'sci'
update(file, dat, 3, header=hdr) # update the 3rd extension HDU
update(file, dat, header=hdr, ext=5) # update the 5th extension HDU
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
"""
# The arguments to this function are a bit trickier to deal with than others
# in this module, since the documentation has promised that the header
# argument can be an optional positional argument.
if args and isinstance(args[0], Header):
header = args[0]
args = args[1:]
else:
header = None
# The header can also be a keyword argument--if both are provided the
# keyword takes precedence
header = kwargs.pop("header", header)
new_hdu = _makehdu(data, header)
closed = fileobj_closed(filename)
hdulist, _ext = _getext(filename, "update", *args, **kwargs)
try:
hdulist[_ext] = new_hdu
finally:
hdulist.close(closed=closed)
def info(filename, output=None, **kwargs):
"""
Print the summary information on a FITS file.
This includes the name, type, length of header, data shape and type
for each HDU.
Parameters
----------
filename : path-like or file-like
FITS file to obtain info from. If opened, mode must be one of
the following: rb, rb+, or ab+ (i.e. the file must be readable).
output : file, bool, optional
A file-like object to write the output to. If ``False``, does not
output to a file and instead returns a list of tuples representing the
HDU info. Writes to ``sys.stdout`` by default.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function sets ``ignore_missing_end=True`` by default.
"""
mode, closed = _get_file_mode(filename, default="readonly")
# Set the default value for the ignore_missing_end parameter
if "ignore_missing_end" not in kwargs:
kwargs["ignore_missing_end"] = True
f = fitsopen(filename, mode=mode, **kwargs)
try:
ret = f.info(output=output)
finally:
if closed:
f.close()
return ret
def printdiff(inputa, inputb, *args, **kwargs):
"""
Compare two parts of a FITS file, including entire FITS files,
FITS `HDUList` objects and FITS ``HDU`` objects.
Parameters
----------
inputa : str, `HDUList` object, or ``HDU`` object
The filename of a FITS file, `HDUList`, or ``HDU``
object to compare to ``inputb``.
inputb : str, `HDUList` object, or ``HDU`` object
The filename of a FITS file, `HDUList`, or ``HDU``
object to compare to ``inputa``.
ext, extname, extver
Additional positional arguments are for HDU specification if your
inputs are string filenames (will not work if
``inputa`` and ``inputb`` are ``HDU`` objects or `HDUList` objects).
They are flexible and are best illustrated by examples. In addition
to using these arguments positionally you can directly call the
keyword parameters ``ext``, ``extname``.
By HDU number::
printdiff('inA.fits', 'inB.fits', 0) # the primary HDU
printdiff('inA.fits', 'inB.fits', 2) # the second extension HDU
printdiff('inA.fits', 'inB.fits', ext=2) # the second extension HDU
By name, i.e., ``EXTNAME`` value (if unique). ``EXTNAME`` values are
not case sensitive:
printdiff('inA.fits', 'inB.fits', 'sci')
printdiff('inA.fits', 'inB.fits', extname='sci') # equivalent
By combination of ``EXTNAME`` and ``EXTVER`` as separate
arguments or as a tuple::
printdiff('inA.fits', 'inB.fits', 'sci', 2) # EXTNAME='SCI'
# & EXTVER=2
printdiff('inA.fits', 'inB.fits', extname='sci', extver=2)
# equivalent
printdiff('inA.fits', 'inB.fits', ('sci', 2)) # equivalent
Ambiguous or conflicting specifications will raise an exception::
printdiff('inA.fits', 'inB.fits',
ext=('sci', 1), extname='err', extver=2)
**kwargs
Any additional keyword arguments to be passed to
`~astropy.io.fits.FITSDiff`.
Notes
-----
The primary use for the `printdiff` function is to allow quick print out
of a FITS difference report and will write to ``sys.stdout``.
To save the diff report to a file please use `~astropy.io.fits.FITSDiff`
directly.
"""
# Pop extension keywords
extension = {
key: kwargs.pop(key) for key in ["ext", "extname", "extver"] if key in kwargs
}
has_extensions = args or extension
if isinstance(inputa, str) and has_extensions:
# Use handy _getext to interpret any ext keywords, but
# will need to close a if fails
modea, closeda = _get_file_mode(inputa)
modeb, closedb = _get_file_mode(inputb)
hdulista, extidxa = _getext(inputa, modea, *args, **extension)
# Have to close a if b doesn't make it
try:
hdulistb, extidxb = _getext(inputb, modeb, *args, **extension)
except Exception:
hdulista.close(closed=closeda)
raise
try:
hdua = hdulista[extidxa]
hdub = hdulistb[extidxb]
# See below print for note
print(HDUDiff(hdua, hdub, **kwargs).report())
finally:
hdulista.close(closed=closeda)
hdulistb.close(closed=closedb)
# If input is not a string, can feed HDU objects or HDUList directly,
# but can't currently handle extensions
elif isinstance(inputa, _ValidHDU) and has_extensions:
raise ValueError("Cannot use extension keywords when providing an HDU object.")
elif isinstance(inputa, _ValidHDU) and not has_extensions:
print(HDUDiff(inputa, inputb, **kwargs).report())
elif isinstance(inputa, HDUList) and has_extensions:
raise NotImplementedError(
"Extension specification with HDUList objects not implemented."
)
# This function is EXCLUSIVELY for printing the diff report to screen
# in a one-liner call, hence the use of print instead of logging
else:
print(FITSDiff(inputa, inputb, **kwargs).report())
def tabledump(filename, datafile=None, cdfile=None, hfile=None, ext=1, overwrite=False):
"""
Dump a table HDU to a file in ASCII format. The table may be
dumped in three separate files, one containing column definitions,
one containing header parameters, and one for table data.
Parameters
----------
filename : path-like or file-like
Input fits file.
datafile : path-like or file-like, optional
Output data file. The default is the root name of the input
fits file appended with an underscore, followed by the
extension number (ext), followed by the extension ``.txt``.
cdfile : path-like or file-like, optional
Output column definitions file. The default is `None`,
no column definitions output is produced.
hfile : path-like or file-like, optional
Output header parameters file. The default is `None`,
no header parameters output is produced.
ext : int
The number of the extension containing the table HDU to be
dumped.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
Notes
-----
The primary use for the `tabledump` function is to allow editing in a
standard text editor of the table data and parameters. The
`tableload` function can be used to reassemble the table from the
three ASCII files.
"""
# allow file object to already be opened in any of the valid modes
# and leave the file in the same state (opened or closed) as when
# the function was called
mode, closed = _get_file_mode(filename, default="readonly")
f = fitsopen(filename, mode=mode)
# Create the default data file name if one was not provided
try:
if not datafile:
root, tail = os.path.splitext(f._file.name)
datafile = root + "_" + repr(ext) + ".txt"
# Dump the data from the HDU to the files
f[ext].dump(datafile, cdfile, hfile, overwrite)
finally:
if closed:
f.close()
if isinstance(tabledump.__doc__, str):
tabledump.__doc__ += BinTableHDU._tdump_file_format.replace("\n", "\n ")
def tableload(datafile, cdfile, hfile=None):
"""
Create a table from the input ASCII files. The input is from up
to three separate files, one containing column definitions, one
containing header parameters, and one containing column data. The
header parameters file is not required. When the header
parameters file is absent a minimal header is constructed.
Parameters
----------
datafile : path-like or file-like
Input data file containing the table data in ASCII format.
cdfile : path-like or file-like
Input column definition file containing the names, formats,
display formats, physical units, multidimensional array
dimensions, undefined values, scale factors, and offsets
associated with the columns in the table.
hfile : path-like or file-like, optional
Input parameter definition file containing the header
parameter definitions to be associated with the table.
If `None`, a minimal header is constructed.
Notes
-----
The primary use for the `tableload` function is to allow the input of
ASCII data that was edited in a standard text editor of the table
data and parameters. The tabledump function can be used to create the
initial ASCII files.
"""
return BinTableHDU.load(datafile, cdfile, hfile, replace=True)
if isinstance(tableload.__doc__, str):
tableload.__doc__ += BinTableHDU._tdump_file_format.replace("\n", "\n ")
def _getext(filename, mode, *args, ext=None, extname=None, extver=None, **kwargs):
"""
Open the input file, return the `HDUList` and the extension.
This supports several different styles of extension selection. See the
:func:`getdata()` documentation for the different possibilities.
"""
err_msg = "Redundant/conflicting extension arguments(s): {}".format(
{"args": args, "ext": ext, "extname": extname, "extver": extver}
)
# This code would be much simpler if just one way of specifying an
# extension were picked. But now we need to support all possible ways for
# the time being.
if len(args) == 1:
# Must be either an extension number, an extension name, or an
# (extname, extver) tuple
if _is_int(args[0]) or (isinstance(ext, tuple) and len(ext) == 2):
if ext is not None or extname is not None or extver is not None:
raise TypeError(err_msg)
ext = args[0]
elif isinstance(args[0], str):
# The first arg is an extension name; it could still be valid
# to provide an extver kwarg
if ext is not None or extname is not None:
raise TypeError(err_msg)
extname = args[0]
else:
# Take whatever we have as the ext argument; we'll validate it
# below
ext = args[0]
elif len(args) == 2:
# Must be an extname and extver
if ext is not None or extname is not None or extver is not None:
raise TypeError(err_msg)
extname = args[0]
extver = args[1]
elif len(args) > 2:
raise TypeError("Too many positional arguments.")
if ext is not None and not (
_is_int(ext)
or (
isinstance(ext, tuple)
and len(ext) == 2
and isinstance(ext[0], str)
and _is_int(ext[1])
)
):
raise ValueError(
"The ext keyword must be either an extension number "
"(zero-indexed) or a (extname, extver) tuple."
)
if extname is not None and not isinstance(extname, str):
raise ValueError("The extname argument must be a string.")
if extver is not None and not _is_int(extver):
raise ValueError("The extver argument must be an integer.")
if ext is None and extname is None and extver is None:
ext = 0
elif ext is not None and (extname is not None or extver is not None):
raise TypeError(err_msg)
elif extname:
if extver:
ext = (extname, extver)
else:
ext = (extname, 1)
elif extver and extname is None:
raise TypeError("extver alone cannot specify an extension.")
hdulist = fitsopen(filename, mode=mode, **kwargs)
return hdulist, ext
def _makehdu(data, header):
if header is None:
header = Header()
hdu = _BaseHDU._from_data(data, header)
if hdu.__class__ in (_BaseHDU, _ValidHDU):
# The HDU type was unrecognized, possibly due to a
# nonexistent/incomplete header
if (
isinstance(data, np.ndarray) and data.dtype.fields is not None
) or isinstance(data, np.recarray):
hdu = BinTableHDU(data, header=header)
elif isinstance(data, np.ndarray) or _is_dask_array(data):
hdu = ImageHDU(data, header=header)
else:
raise KeyError("Data must be a numpy array.")
return hdu
def _stat_filename_or_fileobj(filename):
if isinstance(filename, os.PathLike):
filename = os.fspath(filename)
closed = fileobj_closed(filename)
name = fileobj_name(filename) or ""
try:
loc = filename.tell()
except AttributeError:
loc = 0
noexist_or_empty = (
name and (not os.path.exists(name) or (os.path.getsize(name) == 0))
) or (not name and loc == 0)
return name, closed, noexist_or_empty
def _get_file_mode(filename, default="readonly"):
"""
Allow file object to already be opened in any of the valid modes and
and leave the file in the same state (opened or closed) as when
the function was called.
"""
mode = default
closed = fileobj_closed(filename)
fmode = fileobj_mode(filename)
if fmode is not None:
mode = FILE_MODES.get(fmode)
if mode is None:
raise OSError(
"File mode of the input file object ({!r}) cannot be used to "
"read/write FITS files.".format(fmode)
)
return mode, closed
|
e119104bb67ab3a2c024203726351d8a4a343033ca03c7522262a6ba19e026e2 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import io
import itertools
import mmap
import operator
import os
import platform
import signal
import sys
import tempfile
import textwrap
import threading
import warnings
import weakref
from contextlib import contextmanager, suppress
from functools import wraps
import numpy as np
from packaging.version import Version
from astropy.utils import data
from astropy.utils.exceptions import AstropyUserWarning
path_like = (str, bytes, os.PathLike)
cmp = lambda a, b: (a > b) - (a < b)
all_integer_types = (int, np.integer)
class NotifierMixin:
"""
Mixin class that provides services by which objects can register
listeners to changes on that object.
All methods provided by this class are underscored, since this is intended
for internal use to communicate between classes in a generic way, and is
not machinery that should be exposed to users of the classes involved.
Use the ``_add_listener`` method to register a listener on an instance of
the notifier. This registers the listener with a weak reference, so if
no other references to the listener exist it is automatically dropped from
the list and does not need to be manually removed.
Call the ``_notify`` method on the notifier to update all listeners
upon changes. ``_notify('change_type', *args, **kwargs)`` results
in calling ``listener._update_change_type(*args, **kwargs)`` on all
listeners subscribed to that notifier.
If a particular listener does not have the appropriate update method
it is ignored.
Examples
--------
>>> class Widget(NotifierMixin):
... state = 1
... def __init__(self, name):
... self.name = name
... def update_state(self):
... self.state += 1
... self._notify('widget_state_changed', self)
...
>>> class WidgetListener:
... def _update_widget_state_changed(self, widget):
... print('Widget {0} changed state to {1}'.format(
... widget.name, widget.state))
...
>>> widget = Widget('fred')
>>> listener = WidgetListener()
>>> widget._add_listener(listener)
>>> widget.update_state()
Widget fred changed state to 2
"""
_listeners = None
def _add_listener(self, listener):
"""
Add an object to the list of listeners to notify of changes to this
object. This adds a weakref to the list of listeners that is
removed from the listeners list when the listener has no other
references to it.
"""
if self._listeners is None:
self._listeners = weakref.WeakValueDictionary()
self._listeners[id(listener)] = listener
def _remove_listener(self, listener):
"""
Removes the specified listener from the listeners list. This relies
on object identity (i.e. the ``is`` operator).
"""
if self._listeners is None:
return
with suppress(KeyError):
del self._listeners[id(listener)]
def _notify(self, notification, *args, **kwargs):
"""
Notify all listeners of some particular state change by calling their
``_update_<notification>`` method with the given ``*args`` and
``**kwargs``.
The notification does not by default include the object that actually
changed (``self``), but it certainly may if required.
"""
if self._listeners is None:
return
method_name = f"_update_{notification}"
for listener in self._listeners.valuerefs():
# Use valuerefs instead of itervaluerefs; see
# https://github.com/astropy/astropy/issues/4015
listener = listener() # dereference weakref
if listener is None:
continue
if hasattr(listener, method_name):
method = getattr(listener, method_name)
if callable(method):
method(*args, **kwargs)
def __getstate__(self):
"""
Exclude listeners when saving the listener's state, since they may be
ephemeral.
"""
# TODO: This hasn't come up often, but if anyone needs to pickle HDU
# objects it will be necessary when HDU objects' states are restored to
# re-register themselves as listeners on their new column instances.
try:
state = super().__getstate__()
except AttributeError:
# Chances are the super object doesn't have a getstate
state = self.__dict__.copy()
state["_listeners"] = None
return state
def first(iterable):
"""
Returns the first item returned by iterating over an iterable object.
Example:
>>> a = [1, 2, 3]
>>> first(a)
1
"""
return next(iter(iterable))
def itersubclasses(cls, _seen=None):
"""
Generator over all subclasses of a given class, in depth first order.
>>> class A: pass
>>> class B(A): pass
>>> class C(A): pass
>>> class D(B,C): pass
>>> class E(D): pass
>>>
>>> for cls in itersubclasses(A):
... print(cls.__name__)
B
D
E
C
>>> # get ALL classes currently defined
>>> [cls.__name__ for cls in itersubclasses(object)]
[...'tuple', ...'type', ...]
From http://code.activestate.com/recipes/576949/
"""
if _seen is None:
_seen = set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in sorted(subs, key=operator.attrgetter("__name__")):
if sub not in _seen:
_seen.add(sub)
yield sub
for sub in itersubclasses(sub, _seen):
yield sub
def ignore_sigint(func):
"""
This decorator registers a custom SIGINT handler to catch and ignore SIGINT
until the wrapped function is completed.
"""
@wraps(func)
def wrapped(*args, **kwargs):
# Get the name of the current thread and determine if this is a single
# threaded application
curr_thread = threading.current_thread()
single_thread = (
threading.active_count() == 1 and curr_thread.name == "MainThread"
)
class SigintHandler:
def __init__(self):
self.sigint_received = False
def __call__(self, signum, frame):
warnings.warn(
f"KeyboardInterrupt ignored until {func.__name__} is complete!",
AstropyUserWarning,
)
self.sigint_received = True
sigint_handler = SigintHandler()
# Define new signal interput handler
if single_thread:
# Install new handler
old_handler = signal.signal(signal.SIGINT, sigint_handler)
try:
func(*args, **kwargs)
finally:
if single_thread:
if old_handler is not None:
signal.signal(signal.SIGINT, old_handler)
else:
signal.signal(signal.SIGINT, signal.SIG_DFL)
if sigint_handler.sigint_received:
raise KeyboardInterrupt
return wrapped
def pairwise(iterable):
"""Return the items of an iterable paired with its next item.
Ex: s -> (s0,s1), (s1,s2), (s2,s3), ....
"""
a, b = itertools.tee(iterable)
for _ in b:
# Just a little trick to advance b without having to catch
# StopIter if b happens to be empty
break
return zip(a, b)
def encode_ascii(s):
if isinstance(s, str):
return s.encode("ascii")
elif isinstance(s, np.ndarray) and issubclass(s.dtype.type, np.str_):
ns = np.char.encode(s, "ascii").view(type(s))
if ns.dtype.itemsize != s.dtype.itemsize / 4:
ns = ns.astype((np.bytes_, s.dtype.itemsize / 4))
return ns
elif isinstance(s, np.ndarray) and not issubclass(s.dtype.type, np.bytes_):
raise TypeError("string operation on non-string array")
return s
def decode_ascii(s):
if isinstance(s, bytes):
try:
return s.decode("ascii")
except UnicodeDecodeError:
warnings.warn(
"non-ASCII characters are present in the FITS "
'file header and have been replaced by "?" '
"characters",
AstropyUserWarning,
)
s = s.decode("ascii", errors="replace")
return s.replace("\ufffd", "?")
elif isinstance(s, np.ndarray) and issubclass(s.dtype.type, np.bytes_):
# np.char.encode/decode annoyingly don't preserve the type of the
# array, hence the view() call
# It also doesn't necessarily preserve widths of the strings,
# hence the astype()
if s.size == 0:
# Numpy apparently also has a bug that if a string array is
# empty calling np.char.decode on it returns an empty float64
# array : https://github.com/numpy/numpy/issues/13156
dt = s.dtype.str.replace("S", "U")
ns = np.array([], dtype=dt).view(type(s))
else:
ns = np.char.decode(s, "ascii").view(type(s))
if ns.dtype.itemsize / 4 != s.dtype.itemsize:
ns = ns.astype((np.str_, s.dtype.itemsize))
return ns
elif isinstance(s, np.ndarray) and not issubclass(s.dtype.type, np.str_):
# Don't silently pass through on non-string arrays; we don't want
# to hide errors where things that are not stringy are attempting
# to be decoded
raise TypeError("string operation on non-string array")
return s
def isreadable(f):
"""
Returns True if the file-like object can be read from. This is a common-
sense approximation of io.IOBase.readable.
"""
if hasattr(f, "readable"):
return f.readable()
if hasattr(f, "closed") and f.closed:
# This mimics the behavior of io.IOBase.readable
raise ValueError("I/O operation on closed file")
if not hasattr(f, "read"):
return False
if hasattr(f, "mode") and not any(c in f.mode for c in "r+"):
return False
# Not closed, has a 'read()' method, and either has no known mode or a
# readable mode--should be good enough to assume 'readable'
return True
def iswritable(f):
"""
Returns True if the file-like object can be written to. This is a common-
sense approximation of io.IOBase.writable.
"""
if hasattr(f, "writable"):
return f.writable()
if hasattr(f, "closed") and f.closed:
# This mimics the behavior of io.IOBase.writable
raise ValueError("I/O operation on closed file")
if not hasattr(f, "write"):
return False
if hasattr(f, "mode") and not any(c in f.mode for c in "wa+"):
return False
# Note closed, has a 'write()' method, and either has no known mode or a
# mode that supports writing--should be good enough to assume 'writable'
return True
def isfile(f):
"""
Returns True if the given object represents an OS-level file (that is,
``isinstance(f, file)``).
On Python 3 this also returns True if the given object is higher level
wrapper on top of a FileIO object, such as a TextIOWrapper.
"""
if isinstance(f, io.FileIO):
return True
elif hasattr(f, "buffer"):
return isfile(f.buffer)
elif hasattr(f, "raw"):
return isfile(f.raw)
return False
def fileobj_name(f):
"""
Returns the 'name' of file-like object *f*, if it has anything that could be
called its name. Otherwise f's class or type is returned. If f is a
string f itself is returned.
"""
if isinstance(f, (str, bytes)):
return f
elif isinstance(f, gzip.GzipFile):
# The .name attribute on GzipFiles does not always represent the name
# of the file being read/written--it can also represent the original
# name of the file being compressed
# See the documentation at
# https://docs.python.org/3/library/gzip.html#gzip.GzipFile
# As such, for gzip files only return the name of the underlying
# fileobj, if it exists
return fileobj_name(f.fileobj)
elif hasattr(f, "name"):
return f.name
elif hasattr(f, "filename"):
return f.filename
elif hasattr(f, "__class__"):
return str(f.__class__)
else:
return str(type(f))
def fileobj_closed(f):
"""
Returns True if the given file-like object is closed or if *f* is a string
(and assumed to be a pathname).
Returns False for all other types of objects, under the assumption that
they are file-like objects with no sense of a 'closed' state.
"""
if isinstance(f, path_like):
return True
if hasattr(f, "closed"):
return f.closed
elif hasattr(f, "fileobj") and hasattr(f.fileobj, "closed"):
return f.fileobj.closed
elif hasattr(f, "fp") and hasattr(f.fp, "closed"):
return f.fp.closed
else:
return False
def fileobj_mode(f):
"""
Returns the 'mode' string of a file-like object if such a thing exists.
Otherwise returns None.
"""
# Go from most to least specific--for example gzip objects have a 'mode'
# attribute, but it's not analogous to the file.mode attribute
# gzip.GzipFile -like
if hasattr(f, "fileobj") and hasattr(f.fileobj, "mode"):
fileobj = f.fileobj
# astropy.io.fits._File -like, doesn't need additional checks because it's
# already validated
elif hasattr(f, "fileobj_mode"):
return f.fileobj_mode
# PIL-Image -like investigate the fp (filebuffer)
elif hasattr(f, "fp") and hasattr(f.fp, "mode"):
fileobj = f.fp
# FILEIO -like (normal open(...)), keep as is.
elif hasattr(f, "mode"):
fileobj = f
# Doesn't look like a file-like object, for example strings, urls or paths.
else:
return None
return _fileobj_normalize_mode(fileobj)
def _fileobj_normalize_mode(f):
"""Takes care of some corner cases in Python where the mode string
is either oddly formatted or does not truly represent the file mode.
"""
mode = f.mode
# Special case: Gzip modes:
if isinstance(f, gzip.GzipFile):
# GzipFiles can be either readonly or writeonly
if mode == gzip.READ:
return "rb"
elif mode == gzip.WRITE:
return "wb"
else:
return None # This shouldn't happen?
# Sometimes Python can produce modes like 'r+b' which will be normalized
# here to 'rb+'
if "+" in mode:
mode = mode.replace("+", "")
mode += "+"
return mode
def fileobj_is_binary(f):
"""
Returns True if the give file or file-like object has a file open in binary
mode. When in doubt, returns True by default.
"""
# This is kind of a hack for this to work correctly with _File objects,
# which, for the time being, are *always* binary
if hasattr(f, "binary"):
return f.binary
if isinstance(f, io.TextIOBase):
return False
mode = fileobj_mode(f)
if mode:
return "b" in mode
else:
return True
def translate(s, table, deletechars):
if deletechars:
table = table.copy()
for c in deletechars:
table[ord(c)] = None
return s.translate(table)
def fill(text, width, **kwargs):
"""
Like :func:`textwrap.wrap` but preserves existing paragraphs which
:func:`textwrap.wrap` does not otherwise handle well. Also handles section
headers.
"""
paragraphs = text.split("\n\n")
def maybe_fill(t):
if all(len(l) < width for l in t.splitlines()):
return t
else:
return textwrap.fill(t, width, **kwargs)
return "\n\n".join(maybe_fill(p) for p in paragraphs)
# On MacOS X 10.8 and earlier, there is a bug that causes numpy.fromfile to
# fail when reading over 2Gb of data. If we detect these versions of MacOS X,
# we can instead read the data in chunks. To avoid performance penalties at
# import time, we defer the setting of this global variable until the first
# time it is needed.
CHUNKED_FROMFILE = None
def _array_from_file(infile, dtype, count):
"""Create a numpy array from a file or a file-like object."""
if isfile(infile):
global CHUNKED_FROMFILE
if CHUNKED_FROMFILE is None:
if sys.platform == "darwin" and Version(platform.mac_ver()[0]) < Version(
"10.9"
):
CHUNKED_FROMFILE = True
else:
CHUNKED_FROMFILE = False
if CHUNKED_FROMFILE:
chunk_size = int(1024**3 / dtype.itemsize) # 1Gb to be safe
if count < chunk_size:
return np.fromfile(infile, dtype=dtype, count=count)
else:
array = np.empty(count, dtype=dtype)
for beg in range(0, count, chunk_size):
end = min(count, beg + chunk_size)
array[beg:end] = np.fromfile(infile, dtype=dtype, count=end - beg)
return array
else:
return np.fromfile(infile, dtype=dtype, count=count)
else:
# treat as file-like object with "read" method; this includes gzip file
# objects, because numpy.fromfile just reads the compressed bytes from
# their underlying file object, instead of the decompressed bytes
read_size = np.dtype(dtype).itemsize * count
s = infile.read(read_size)
array = np.ndarray(buffer=s, dtype=dtype, shape=(count,))
# copy is needed because np.frombuffer returns a read-only view of the
# underlying buffer
array = array.copy()
return array
_OSX_WRITE_LIMIT = (2**32) - 1
_WIN_WRITE_LIMIT = (2**31) - 1
def _array_to_file(arr, outfile):
"""
Write a numpy array to a file or a file-like object.
Parameters
----------
arr : ndarray
The Numpy array to write.
outfile : file-like
A file-like object such as a Python file object, an `io.BytesIO`, or
anything else with a ``write`` method. The file object must support
the buffer interface in its ``write``.
If writing directly to an on-disk file this delegates directly to
`ndarray.tofile`. Otherwise a slower Python implementation is used.
"""
try:
seekable = outfile.seekable()
except AttributeError:
seekable = False
if isfile(outfile) and seekable:
write = lambda a, f: a.tofile(f)
else:
write = _array_to_file_like
# Implements a workaround for a bug deep in OSX's stdlib file writing
# functions; on 64-bit OSX it is not possible to correctly write a number
# of bytes greater than 2 ** 32 and divisible by 4096 (or possibly 8192--
# whatever the default blocksize for the filesystem is).
# This issue should have a workaround in Numpy too, but hasn't been
# implemented there yet: https://github.com/astropy/astropy/issues/839
#
# Apparently Windows has its own fwrite bug:
# https://github.com/numpy/numpy/issues/2256
if (
sys.platform == "darwin"
and arr.nbytes >= _OSX_WRITE_LIMIT + 1
and arr.nbytes % 4096 == 0
):
# chunksize is a count of elements in the array, not bytes
chunksize = _OSX_WRITE_LIMIT // arr.itemsize
elif sys.platform.startswith("win"):
chunksize = _WIN_WRITE_LIMIT // arr.itemsize
else:
# Just pass the whole array to the write routine
return write(arr, outfile)
# Write one chunk at a time for systems whose fwrite chokes on large
# writes.
idx = 0
arr = arr.view(np.ndarray).flatten()
while idx < arr.nbytes:
write(arr[idx : idx + chunksize], outfile)
idx += chunksize
def _array_to_file_like(arr, fileobj):
"""
Write a `~numpy.ndarray` to a file-like object (which is not supported by
`numpy.ndarray.tofile`).
"""
# If the array is empty, we can simply take a shortcut and return since
# there is nothing to write.
if len(arr) == 0:
return
if arr.flags.contiguous:
# It suffices to just pass the underlying buffer directly to the
# fileobj's write (assuming it supports the buffer interface). If
# it does not have the buffer interface, a TypeError should be returned
# in which case we can fall back to the other methods.
try:
fileobj.write(arr.data)
except TypeError:
pass
else:
return
if hasattr(np, "nditer"):
# nditer version for non-contiguous arrays
for item in np.nditer(arr, order="C"):
fileobj.write(item.tobytes())
else:
# Slower version for Numpy versions without nditer;
# The problem with flatiter is it doesn't preserve the original
# byteorder
byteorder = arr.dtype.byteorder
if (sys.byteorder == "little" and byteorder == ">") or (
sys.byteorder == "big" and byteorder == "<"
):
for item in arr.flat:
fileobj.write(item.byteswap().tobytes())
else:
for item in arr.flat:
fileobj.write(item.tobytes())
def _write_string(f, s):
"""
Write a string to a file, encoding to ASCII if the file is open in binary
mode, or decoding if the file is open in text mode.
"""
# Assume if the file object doesn't have a specific mode, that the mode is
# binary
binmode = fileobj_is_binary(f)
if binmode and isinstance(s, str):
s = encode_ascii(s)
elif not binmode and not isinstance(f, str):
s = decode_ascii(s)
f.write(s)
def _convert_array(array, dtype):
"""
Converts an array to a new dtype--if the itemsize of the new dtype is
the same as the old dtype and both types are not numeric, a view is
returned. Otherwise a new array must be created.
"""
if array.dtype == dtype:
return array
elif array.dtype.itemsize == dtype.itemsize and not (
np.issubdtype(array.dtype, np.number) and np.issubdtype(dtype, np.number)
):
# Includes a special case when both dtypes are at least numeric to
# account for old Trac ticket 218 (now inaccessible).
return array.view(dtype)
else:
return array.astype(dtype)
def _pseudo_zero(dtype):
"""
Given a numpy dtype, finds its "zero" point, which is exactly in the
middle of its range.
"""
# special case for int8
if dtype.kind == "i" and dtype.itemsize == 1:
return -128
assert dtype.kind == "u"
return 1 << (dtype.itemsize * 8 - 1)
def _is_pseudo_integer(dtype):
return (dtype.kind == "u" and dtype.itemsize >= 2) or (
dtype.kind == "i" and dtype.itemsize == 1
)
def _is_int(val):
return isinstance(val, all_integer_types)
def _str_to_num(val):
"""Converts a given string to either an int or a float if necessary."""
try:
num = int(val)
except ValueError:
# If this fails then an exception should be raised anyways
num = float(val)
return num
def _words_group(s, width):
"""
Split a long string into parts where each part is no longer than ``strlen``
and no word is cut into two pieces. But if there are any single words
which are longer than ``strlen``, then they will be split in the middle of
the word.
"""
words = []
slen = len(s)
# appending one blank at the end always ensures that the "last" blank
# is beyond the end of the string
arr = np.frombuffer(s.encode("utf8") + b" ", dtype="S1")
# locations of the blanks
blank_loc = np.nonzero(arr == b" ")[0]
offset = 0
xoffset = 0
while True:
try:
loc = np.nonzero(blank_loc >= width + offset)[0][0]
except IndexError:
loc = len(blank_loc)
if loc > 0:
offset = blank_loc[loc - 1] + 1
else:
offset = -1
# check for one word longer than strlen, break in the middle
if offset <= xoffset:
offset = min(xoffset + width, slen)
# collect the pieces in a list
words.append(s[xoffset:offset])
if offset >= slen:
break
xoffset = offset
return words
def _tmp_name(input):
"""
Create a temporary file name which should not already exist. Use the
directory of the input file as the base name of the mkstemp() output.
"""
if input is not None:
input = os.path.dirname(input)
f, fn = tempfile.mkstemp(dir=input)
os.close(f)
return fn
def _get_array_mmap(array):
"""
If the array has an mmap.mmap at base of its base chain, return the mmap
object; otherwise return None.
"""
if isinstance(array, mmap.mmap):
return array
base = array
while hasattr(base, "base") and base.base is not None:
if isinstance(base.base, mmap.mmap):
return base.base
base = base.base
@contextmanager
def _free_space_check(hdulist, dirname=None):
try:
yield
except OSError as exc:
error_message = ""
if not isinstance(hdulist, list):
hdulist = [
hdulist,
]
if dirname is None:
dirname = os.path.dirname(hdulist._file.name)
if os.path.isdir(dirname):
free_space = data.get_free_space_in_dir(dirname)
hdulist_size = sum(hdu.size for hdu in hdulist)
if free_space < hdulist_size:
error_message = (
"Not enough space on disk: requested {}, available {}. ".format(
hdulist_size, free_space
)
)
for hdu in hdulist:
hdu._close()
raise OSError(error_message + str(exc))
def _extract_number(value, default):
"""
Attempts to extract an integer number from the given value. If the
extraction fails, the value of the 'default' argument is returned.
"""
try:
# The _str_to_num method converts the value to string/float
# so we need to perform one additional conversion to int on top
return int(_str_to_num(value))
except (TypeError, ValueError):
return default
def get_testdata_filepath(filename):
"""
Return a string representing the path to the file requested from the
io.fits test data set.
.. versionadded:: 2.0.3
Parameters
----------
filename : str
The filename of the test data file.
Returns
-------
filepath : str
The path to the requested file.
"""
return data.get_pkg_data_filename(f"io/fits/tests/data/{filename}", "astropy")
def _rstrip_inplace(array):
"""
Performs an in-place rstrip operation on string arrays. This is necessary
since the built-in `np.char.rstrip` in Numpy does not perform an in-place
calculation.
"""
# The following implementation convert the string to unsigned integers of
# the right length. Trailing spaces (which are represented as 32) are then
# converted to null characters (represented as zeros). To avoid creating
# large temporary mask arrays, we loop over chunks (attempting to do that
# on a 1-D version of the array; large memory may still be needed in the
# unlikely case that a string array has small first dimension and cannot
# be represented as a contiguous 1-D array in memory).
dt = array.dtype
if dt.kind not in "SU":
raise TypeError("This function can only be used on string arrays")
# View the array as appropriate integers. The last dimension will
# equal the number of characters in each string.
bpc = 1 if dt.kind == "S" else 4
dt_int = f"({dt.itemsize // bpc},){dt.byteorder}u{bpc}"
b = array.view(dt_int, np.ndarray)
# For optimal speed, work in chunks of the internal ufunc buffer size.
bufsize = np.getbufsize()
# Attempt to have the strings as a 1-D array to give the chunk known size.
# Note: the code will work if this fails; the chunks will just be larger.
if b.ndim > 2:
try:
b.shape = -1, b.shape[-1]
except AttributeError: # can occur for non-contiguous arrays
pass
for j in range(0, b.shape[0], bufsize):
c = b[j : j + bufsize]
# Mask which will tell whether we're in a sequence of trailing spaces.
mask = np.ones(c.shape[:-1], dtype=bool)
# Loop over the characters in the strings, in reverse order. We process
# the i-th character of all strings in the chunk at the same time. If
# the character is 32, this corresponds to a space, and we then change
# this to 0. We then construct a new mask to find rows where the
# i-th character is 0 (null) and the i-1-th is 32 (space) and repeat.
for i in range(-1, -c.shape[-1], -1):
mask &= c[..., i] == 32
c[..., i][mask] = 0
mask = c[..., i] == 0
return array
def _is_dask_array(data):
"""Check whether data is a dask array.
We avoid importing dask unless it is likely it is a dask array,
so that non-dask code is not slowed down.
"""
if not hasattr(data, "compute"):
return False
try:
from dask.array import Array
except ImportError:
# If we cannot import dask, surely this cannot be a
# dask array!
return False
else:
return isinstance(data, Array)
|
36e22d62f022e6f690f26f495d551b68459d0173b55bea3a9fa83947b288e31e | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import re
import warnings
import numpy as np
from astropy.utils.exceptions import AstropyUserWarning
from . import conf
from .util import _is_int, _str_to_num, _words_group, translate
from .verify import VerifyError, VerifyWarning, _ErrList, _Verify
__all__ = ["Card", "Undefined"]
FIX_FP_TABLE = str.maketrans("de", "DE")
FIX_FP_TABLE2 = str.maketrans("dD", "eE")
CARD_LENGTH = 80
BLANK_CARD = " " * CARD_LENGTH
KEYWORD_LENGTH = 8 # The max length for FITS-standard keywords
VALUE_INDICATOR = "= " # The standard FITS value indicator
VALUE_INDICATOR_LEN = len(VALUE_INDICATOR)
HIERARCH_VALUE_INDICATOR = "=" # HIERARCH cards may use a shortened indicator
class Undefined:
"""Undefined value."""
def __init__(self):
# This __init__ is required to be here for Sphinx documentation
pass
UNDEFINED = Undefined()
class Card(_Verify):
length = CARD_LENGTH
"""The length of a Card image; should always be 80 for valid FITS files."""
# String for a FITS standard compliant (FSC) keyword.
_keywd_FSC_RE = re.compile(r"^[A-Z0-9_-]{0,%d}$" % KEYWORD_LENGTH)
# This will match any printable ASCII character excluding '='
_keywd_hierarch_RE = re.compile(r"^(?:HIERARCH +)?(?:^[ -<>-~]+ ?)+$", re.I)
# A number sub-string, either an integer or a float in fixed or
# scientific notation. One for FSC and one for non-FSC (NFSC) format:
# NFSC allows lower case of DE for exponent, allows space between sign,
# digits, exponent sign, and exponents
_digits_FSC = r"(\.\d+|\d+(\.\d*)?)([DE][+-]?\d+)?"
_digits_NFSC = r"(\.\d+|\d+(\.\d*)?) *([deDE] *[+-]? *\d+)?"
_numr_FSC = r"[+-]?" + _digits_FSC
_numr_NFSC = r"[+-]? *" + _digits_NFSC
# This regex helps delete leading zeros from numbers, otherwise
# Python might evaluate them as octal values (this is not-greedy, however,
# so it may not strip leading zeros from a float, which is fine)
_number_FSC_RE = re.compile(rf"(?P<sign>[+-])?0*?(?P<digt>{_digits_FSC})")
_number_NFSC_RE = re.compile(rf"(?P<sign>[+-])? *0*?(?P<digt>{_digits_NFSC})")
# Used in cards using the CONTINUE convention which expect a string
# followed by an optional comment
_strg = r"\'(?P<strg>([ -~]+?|\'\'|) *?)\'(?=$|/| )"
_comm_field = r"(?P<comm_field>(?P<sepr>/ *)(?P<comm>(.|\n)*))"
_strg_comment_RE = re.compile(f"({_strg})? *{_comm_field}?")
# FSC commentary card string which must contain printable ASCII characters.
# Note: \Z matches the end of the string without allowing newlines
_ascii_text_re = re.compile(r"[ -~]*\Z")
# Checks for a valid value/comment string. It returns a match object
# for a valid value/comment string.
# The valu group will return a match if a FITS string, boolean,
# number, or complex value is found, otherwise it will return
# None, meaning the keyword is undefined. The comment field will
# return a match if the comment separator is found, though the
# comment maybe an empty string.
# fmt: off
_value_FSC_RE = re.compile(
r'(?P<valu_field> *'
r'(?P<valu>'
# The <strg> regex is not correct for all cases, but
# it comes pretty darn close. It appears to find the
# end of a string rather well, but will accept
# strings with an odd number of single quotes,
# instead of issuing an error. The FITS standard
# appears vague on this issue and only states that a
# string should not end with two single quotes,
# whereas it should not end with an even number of
# quotes to be precise.
#
# Note that a non-greedy match is done for a string,
# since a greedy match will find a single-quote after
# the comment separator resulting in an incorrect
# match.
rf'{_strg}|'
r'(?P<bool>[FT])|'
r'(?P<numr>' + _numr_FSC + r')|'
r'(?P<cplx>\( *'
r'(?P<real>' + _numr_FSC + r') *, *'
r'(?P<imag>' + _numr_FSC + r') *\))'
r')? *)'
r'(?P<comm_field>'
r'(?P<sepr>/ *)'
r'(?P<comm>[!-~][ -~]*)?'
r')?$'
)
# fmt: on
# fmt: off
_value_NFSC_RE = re.compile(
r'(?P<valu_field> *'
r'(?P<valu>'
rf'{_strg}|'
r'(?P<bool>[FT])|'
r'(?P<numr>' + _numr_NFSC + r')|'
r'(?P<cplx>\( *'
r'(?P<real>' + _numr_NFSC + r') *, *'
r'(?P<imag>' + _numr_NFSC + r') *\))'
fr')? *){_comm_field}?$'
)
# fmt: on
_rvkc_identifier = r"[a-zA-Z_]\w*"
_rvkc_field = _rvkc_identifier + r"(\.\d+)?"
_rvkc_field_specifier_s = rf"{_rvkc_field}(\.{_rvkc_field})*"
_rvkc_field_specifier_val = r"(?P<keyword>{}): +(?P<val>{})".format(
_rvkc_field_specifier_s, _numr_FSC
)
_rvkc_keyword_val = rf"\'(?P<rawval>{_rvkc_field_specifier_val})\'"
_rvkc_keyword_val_comm = rf" *{_rvkc_keyword_val} *(/ *(?P<comm>[ -~]*))?$"
_rvkc_field_specifier_val_RE = re.compile(_rvkc_field_specifier_val + "$")
# regular expression to extract the key and the field specifier from a
# string that is being used to index into a card list that contains
# record value keyword cards (ex. 'DP1.AXIS.1')
_rvkc_keyword_name_RE = re.compile(
r"(?P<keyword>{})\.(?P<field_specifier>{})$".format(
_rvkc_identifier, _rvkc_field_specifier_s
)
)
# regular expression to extract the field specifier and value and comment
# from the string value of a record value keyword card
# (ex "'AXIS.1: 1' / a comment")
_rvkc_keyword_val_comm_RE = re.compile(_rvkc_keyword_val_comm)
_commentary_keywords = {"", "COMMENT", "HISTORY", "END"}
_special_keywords = _commentary_keywords.union(["CONTINUE"])
# The default value indicator; may be changed if required by a convention
# (namely HIERARCH cards)
_value_indicator = VALUE_INDICATOR
def __init__(self, keyword=None, value=None, comment=None, **kwargs):
# For backwards compatibility, support the 'key' keyword argument:
if keyword is None and "key" in kwargs:
keyword = kwargs["key"]
self._keyword = None
self._value = None
self._comment = None
self._valuestring = None
self._image = None
# This attribute is set to False when creating the card from a card
# image to ensure that the contents of the image get verified at some
# point
self._verified = True
# A flag to conveniently mark whether or not this was a valid HIERARCH
# card
self._hierarch = False
# If the card could not be parsed according the the FITS standard or
# any recognized non-standard conventions, this will be True
self._invalid = False
self._field_specifier = None
# These are used primarily only by RVKCs
self._rawkeyword = None
self._rawvalue = None
if not (
keyword is not None
and value is not None
and self._check_if_rvkc(keyword, value)
):
# If _check_if_rvkc passes, it will handle setting the keyword and
# value
if keyword is not None:
self.keyword = keyword
if value is not None:
self.value = value
if comment is not None:
self.comment = comment
self._modified = False
self._valuemodified = False
def __repr__(self):
return repr((self.keyword, self.value, self.comment))
def __str__(self):
return self.image
def __len__(self):
return 3
def __getitem__(self, index):
return (self.keyword, self.value, self.comment)[index]
@property
def keyword(self):
"""Returns the keyword name parsed from the card image."""
if self._keyword is not None:
return self._keyword
elif self._image:
self._keyword = self._parse_keyword()
return self._keyword
else:
self.keyword = ""
return ""
@keyword.setter
def keyword(self, keyword):
"""Set the key attribute; once set it cannot be modified."""
if self._keyword is not None:
raise AttributeError("Once set, the Card keyword may not be modified")
elif isinstance(keyword, str):
# Be nice and remove trailing whitespace--some FITS code always
# pads keywords out with spaces; leading whitespace, however,
# should be strictly disallowed.
keyword = keyword.rstrip()
keyword_upper = keyword.upper()
if len(keyword) <= KEYWORD_LENGTH and self._keywd_FSC_RE.match(
keyword_upper
):
# For keywords with length > 8 they will be HIERARCH cards,
# and can have arbitrary case keywords
if keyword_upper == "END":
raise ValueError("Keyword 'END' not allowed.")
keyword = keyword_upper
elif self._keywd_hierarch_RE.match(keyword):
# In prior versions of PyFITS (*) HIERARCH cards would only be
# created if the user-supplied keyword explicitly started with
# 'HIERARCH '. Now we will create them automatically for long
# keywords, but we still want to support the old behavior too;
# the old behavior makes it possible to create HIERARCH cards
# that would otherwise be recognized as RVKCs
# (*) This has never affected Astropy, because it was changed
# before PyFITS was merged into Astropy!
self._hierarch = True
self._value_indicator = HIERARCH_VALUE_INDICATOR
if keyword_upper[:9] == "HIERARCH ":
# The user explicitly asked for a HIERARCH card, so don't
# bug them about it...
keyword = keyword[9:].strip()
else:
# We'll gladly create a HIERARCH card, but a warning is
# also displayed
warnings.warn(
"Keyword name {!r} is greater than 8 characters or "
"contains characters not allowed by the FITS "
"standard; a HIERARCH card will be created.".format(keyword),
VerifyWarning,
)
else:
raise ValueError(f"Illegal keyword name: {keyword!r}.")
self._keyword = keyword
self._modified = True
else:
raise ValueError(f"Keyword name {keyword!r} is not a string.")
@property
def value(self):
"""The value associated with the keyword stored in this card."""
if self.field_specifier:
return float(self._value)
if self._value is not None:
value = self._value
elif self._valuestring is not None or self._image:
value = self._value = self._parse_value()
else:
if self._keyword == "":
self._value = value = ""
else:
self._value = value = UNDEFINED
if conf.strip_header_whitespace and isinstance(value, str):
value = value.rstrip()
return value
@value.setter
def value(self, value):
if self._invalid:
raise ValueError(
"The value of invalid/unparsable cards cannot set. Either "
"delete this card from the header or replace it."
)
if value is None:
value = UNDEFINED
try:
oldvalue = self.value
except VerifyError:
# probably a parsing error, falling back to the internal _value
# which should be None. This may happen while calling _fix_value.
oldvalue = self._value
if oldvalue is None:
oldvalue = UNDEFINED
if not isinstance(
value,
(
str,
int,
float,
complex,
bool,
Undefined,
np.floating,
np.integer,
np.complexfloating,
np.bool_,
),
):
raise ValueError(f"Illegal value: {value!r}.")
if isinstance(value, (float, np.float32)) and (
np.isnan(value) or np.isinf(value)
):
# value is checked for both float and np.float32 instances
# since np.float32 is not considered a Python float.
raise ValueError(
f"Floating point {value!r} values are not allowed in FITS headers."
)
elif isinstance(value, str):
m = self._ascii_text_re.match(value)
if not m:
raise ValueError(
"FITS header values must contain standard printable ASCII "
"characters; {!r} contains characters not representable in "
"ASCII or non-printable characters.".format(value)
)
elif isinstance(value, np.bool_):
value = bool(value)
if conf.strip_header_whitespace and (
isinstance(oldvalue, str) and isinstance(value, str)
):
# Ignore extra whitespace when comparing the new value to the old
different = oldvalue.rstrip() != value.rstrip()
elif isinstance(oldvalue, bool) or isinstance(value, bool):
different = oldvalue is not value
else:
different = oldvalue != value or not isinstance(value, type(oldvalue))
if different:
self._value = value
self._rawvalue = None
self._modified = True
self._valuestring = None
self._valuemodified = True
if self.field_specifier:
try:
self._value = _int_or_float(self._value)
except ValueError:
raise ValueError(f"value {self._value} is not a float")
@value.deleter
def value(self):
if self._invalid:
raise ValueError(
"The value of invalid/unparsable cards cannot deleted. "
"Either delete this card from the header or replace it."
)
if not self.field_specifier:
self.value = ""
else:
raise AttributeError(
"Values cannot be deleted from record-valued keyword cards"
)
@property
def rawkeyword(self):
"""On record-valued keyword cards this is the name of the standard <= 8
character FITS keyword that this RVKC is stored in. Otherwise it is
the card's normal keyword.
"""
if self._rawkeyword is not None:
return self._rawkeyword
elif self.field_specifier is not None:
self._rawkeyword = self.keyword.split(".", 1)[0]
return self._rawkeyword
else:
return self.keyword
@property
def rawvalue(self):
"""On record-valued keyword cards this is the raw string value in
the ``<field-specifier>: <value>`` format stored in the card in order
to represent a RVKC. Otherwise it is the card's normal value.
"""
if self._rawvalue is not None:
return self._rawvalue
elif self.field_specifier is not None:
self._rawvalue = f"{self.field_specifier}: {self.value}"
return self._rawvalue
else:
return self.value
@property
def comment(self):
"""Get the comment attribute from the card image if not already set."""
if self._comment is not None:
return self._comment
elif self._image:
self._comment = self._parse_comment()
return self._comment
else:
self._comment = ""
return ""
@comment.setter
def comment(self, comment):
if self._invalid:
raise ValueError(
"The comment of invalid/unparsable cards cannot set. Either "
"delete this card from the header or replace it."
)
if comment is None:
comment = ""
if isinstance(comment, str):
m = self._ascii_text_re.match(comment)
if not m:
raise ValueError(
"FITS header comments must contain standard printable "
"ASCII characters; {!r} contains characters not "
"representable in ASCII or non-printable characters.".format(
comment
)
)
try:
oldcomment = self.comment
except VerifyError:
# probably a parsing error, falling back to the internal _comment
# which should be None.
oldcomment = self._comment
if oldcomment is None:
oldcomment = ""
if comment != oldcomment:
self._comment = comment
self._modified = True
@comment.deleter
def comment(self):
if self._invalid:
raise ValueError(
"The comment of invalid/unparsable cards cannot deleted. "
"Either delete this card from the header or replace it."
)
self.comment = ""
@property
def field_specifier(self):
"""
The field-specifier of record-valued keyword cards; always `None` on
normal cards.
"""
# Ensure that the keyword exists and has been parsed--the will set the
# internal _field_specifier attribute if this is a RVKC.
if self.keyword:
return self._field_specifier
else:
return None
@field_specifier.setter
def field_specifier(self, field_specifier):
if not field_specifier:
raise ValueError(
"The field-specifier may not be blank in record-valued keyword cards."
)
elif not self.field_specifier:
raise AttributeError(
"Cannot coerce cards to be record-valued "
"keyword cards by setting the "
"field_specifier attribute"
)
elif field_specifier != self.field_specifier:
self._field_specifier = field_specifier
# The keyword need also be updated
keyword = self._keyword.split(".", 1)[0]
self._keyword = ".".join([keyword, field_specifier])
self._modified = True
@field_specifier.deleter
def field_specifier(self):
raise AttributeError(
"The field_specifier attribute may not be "
"deleted from record-valued keyword cards."
)
@property
def image(self):
"""
The card "image", that is, the 80 byte character string that represents
this card in an actual FITS header.
"""
if self._image and not self._verified:
self.verify("fix+warn")
if self._image is None or self._modified:
self._image = self._format_image()
return self._image
@property
def is_blank(self):
"""
`True` if the card is completely blank--that is, it has no keyword,
value, or comment. It appears in the header as 80 spaces.
Returns `False` otherwise.
"""
if not self._verified:
# The card image has not been parsed yet; compare directly with the
# string representation of a blank card
return self._image == BLANK_CARD
# If the keyword, value, and comment are all empty (for self.value
# explicitly check that it is a string value, since a blank value is
# returned as '')
return (
not self.keyword
and (isinstance(self.value, str) and not self.value)
and not self.comment
)
@classmethod
def fromstring(cls, image):
"""
Construct a `Card` object from a (raw) string. It will pad the string
if it is not the length of a card image (80 columns). If the card
image is longer than 80 columns, assume it contains ``CONTINUE``
card(s).
"""
card = cls()
if isinstance(image, bytes):
# FITS supports only ASCII, but decode as latin1 and just take all
# bytes for now; if it results in mojibake due to e.g. UTF-8
# encoded data in a FITS header that's OK because it shouldn't be
# there in the first place
image = image.decode("latin1")
card._image = _pad(image)
card._verified = False
return card
@classmethod
def normalize_keyword(cls, keyword):
"""
`classmethod` to convert a keyword value that may contain a
field-specifier to uppercase. The effect is to raise the key to
uppercase and leave the field specifier in its original case.
Parameters
----------
keyword : or str
A keyword value or a ``keyword.field-specifier`` value
"""
# Test first for the most common case: a standard FITS keyword provided
# in standard all-caps
if len(keyword) <= KEYWORD_LENGTH and cls._keywd_FSC_RE.match(keyword):
return keyword
# Test if this is a record-valued keyword
match = cls._rvkc_keyword_name_RE.match(keyword)
if match:
return ".".join(
(match.group("keyword").strip().upper(), match.group("field_specifier"))
)
elif len(keyword) > 9 and keyword[:9].upper() == "HIERARCH ":
# Remove 'HIERARCH' from HIERARCH keywords; this could lead to
# ambiguity if there is actually a keyword card containing
# "HIERARCH HIERARCH", but shame on you if you do that.
return keyword[9:].strip().upper()
else:
# A normal FITS keyword, but provided in non-standard case
return keyword.strip().upper()
def _check_if_rvkc(self, *args):
"""
Determine whether or not the card is a record-valued keyword card.
If one argument is given, that argument is treated as a full card image
and parsed as such. If two arguments are given, the first is treated
as the card keyword (including the field-specifier if the card is
intended as a RVKC), and the second as the card value OR the first value
can be the base keyword, and the second value the 'field-specifier:
value' string.
If the check passes the ._keyword, ._value, and .field_specifier
keywords are set.
Examples
--------
::
self._check_if_rvkc('DP1', 'AXIS.1: 2')
self._check_if_rvkc('DP1.AXIS.1', 2)
self._check_if_rvkc('DP1 = AXIS.1: 2')
"""
if not conf.enable_record_valued_keyword_cards:
return False
if len(args) == 1:
return self._check_if_rvkc_image(*args)
elif len(args) == 2:
keyword, value = args
if not isinstance(keyword, str):
return False
if keyword in self._commentary_keywords:
return False
match = self._rvkc_keyword_name_RE.match(keyword)
if match and isinstance(value, (int, float)):
self._init_rvkc(
match.group("keyword"), match.group("field_specifier"), None, value
)
return True
# Testing for ': ' is a quick way to avoid running the full regular
# expression, speeding this up for the majority of cases
if isinstance(value, str) and value.find(": ") > 0:
match = self._rvkc_field_specifier_val_RE.match(value)
if match and self._keywd_FSC_RE.match(keyword):
self._init_rvkc(
keyword, match.group("keyword"), value, match.group("val")
)
return True
def _check_if_rvkc_image(self, *args):
"""
Implements `Card._check_if_rvkc` for the case of an unparsed card
image. If given one argument this is the full intact image. If given
two arguments the card has already been split between keyword and
value+comment at the standard value indicator '= '.
"""
if len(args) == 1:
image = args[0]
eq_idx = image.find(VALUE_INDICATOR)
if eq_idx < 0 or eq_idx > 9:
return False
keyword = image[:eq_idx]
rest = image[eq_idx + VALUE_INDICATOR_LEN :]
else:
keyword, rest = args
rest = rest.lstrip()
# This test allows us to skip running the full regular expression for
# the majority of cards that do not contain strings or that definitely
# do not contain RVKC field-specifiers; it's very much a
# micro-optimization but it does make a measurable difference
if not rest or rest[0] != "'" or rest.find(": ") < 2:
return False
match = self._rvkc_keyword_val_comm_RE.match(rest)
if match:
self._init_rvkc(
keyword,
match.group("keyword"),
match.group("rawval"),
match.group("val"),
)
return True
def _init_rvkc(self, keyword, field_specifier, field, value):
"""
Sort of addendum to Card.__init__ to set the appropriate internal
attributes if the card was determined to be a RVKC.
"""
keyword_upper = keyword.upper()
self._keyword = ".".join((keyword_upper, field_specifier))
self._rawkeyword = keyword_upper
self._field_specifier = field_specifier
self._value = _int_or_float(value)
self._rawvalue = field
def _parse_keyword(self):
keyword = self._image[:KEYWORD_LENGTH].strip()
keyword_upper = keyword.upper()
if keyword_upper in self._special_keywords:
return keyword_upper
elif (
keyword_upper == "HIERARCH"
and self._image[8] == " "
and HIERARCH_VALUE_INDICATOR in self._image
):
# This is valid HIERARCH card as described by the HIERARCH keyword
# convention:
# http://fits.gsfc.nasa.gov/registry/hierarch_keyword.html
self._hierarch = True
self._value_indicator = HIERARCH_VALUE_INDICATOR
keyword = self._image.split(HIERARCH_VALUE_INDICATOR, 1)[0][9:]
return keyword.strip()
else:
val_ind_idx = self._image.find(VALUE_INDICATOR)
if 0 <= val_ind_idx <= KEYWORD_LENGTH:
# The value indicator should appear in byte 8, but we are
# flexible and allow this to be fixed
if val_ind_idx < KEYWORD_LENGTH:
keyword = keyword[:val_ind_idx]
keyword_upper = keyword_upper[:val_ind_idx]
rest = self._image[val_ind_idx + VALUE_INDICATOR_LEN :]
# So far this looks like a standard FITS keyword; check whether
# the value represents a RVKC; if so then we pass things off to
# the RVKC parser
if self._check_if_rvkc_image(keyword, rest):
return self._keyword
return keyword_upper
else:
warnings.warn(
"The following header keyword is invalid or follows an "
"unrecognized non-standard convention:\n{}".format(self._image),
AstropyUserWarning,
)
self._invalid = True
return keyword
def _parse_value(self):
"""Extract the keyword value from the card image."""
# for commentary cards, no need to parse further
# Likewise for invalid cards
if self.keyword.upper() in self._commentary_keywords or self._invalid:
return self._image[KEYWORD_LENGTH:].rstrip()
if self._check_if_rvkc(self._image):
return self._value
m = self._value_NFSC_RE.match(self._split()[1])
if m is None:
raise VerifyError(
f"Unparsable card ({self.keyword}), fix it first with .verify('fix')."
)
if m.group("bool") is not None:
value = m.group("bool") == "T"
elif m.group("strg") is not None:
value = re.sub("''", "'", m.group("strg"))
elif m.group("numr") is not None:
# Check for numbers with leading 0s.
numr = self._number_NFSC_RE.match(m.group("numr"))
digt = translate(numr.group("digt"), FIX_FP_TABLE2, " ")
if numr.group("sign") is None:
sign = ""
else:
sign = numr.group("sign")
value = _str_to_num(sign + digt)
elif m.group("cplx") is not None:
# Check for numbers with leading 0s.
real = self._number_NFSC_RE.match(m.group("real"))
rdigt = translate(real.group("digt"), FIX_FP_TABLE2, " ")
if real.group("sign") is None:
rsign = ""
else:
rsign = real.group("sign")
value = _str_to_num(rsign + rdigt)
imag = self._number_NFSC_RE.match(m.group("imag"))
idigt = translate(imag.group("digt"), FIX_FP_TABLE2, " ")
if imag.group("sign") is None:
isign = ""
else:
isign = imag.group("sign")
value += _str_to_num(isign + idigt) * 1j
else:
value = UNDEFINED
if not self._valuestring:
self._valuestring = m.group("valu")
return value
def _parse_comment(self):
"""Extract the keyword value from the card image."""
# for commentary cards, no need to parse further
# likewise for invalid/unparsable cards
if self.keyword in Card._commentary_keywords or self._invalid:
return ""
valuecomment = self._split()[1]
m = self._value_NFSC_RE.match(valuecomment)
comment = ""
if m is not None:
# Don't combine this if statement with the one above, because
# we only want the elif case to run if this was not a valid
# card at all
if m.group("comm"):
comment = m.group("comm").rstrip()
elif "/" in valuecomment:
# The value in this FITS file was not in a valid/known format. In
# this case the best we can do is guess that everything after the
# first / was meant to be the comment
comment = valuecomment.split("/", 1)[1].strip()
return comment
def _split(self):
"""
Split the card image between the keyword and the rest of the card.
"""
if self._image is not None:
# If we already have a card image, don't try to rebuild a new card
# image, which self.image would do
image = self._image
else:
image = self.image
# Split cards with CONTINUE cards or commentary keywords with long
# values
if len(self._image) > self.length:
values = []
comments = []
keyword = None
for card in self._itersubcards():
kw, vc = card._split()
if keyword is None:
keyword = kw
if keyword in self._commentary_keywords:
values.append(vc)
continue
# Should match a string followed by a comment; if not it
# might be an invalid Card, so we just take it verbatim
m = self._strg_comment_RE.match(vc)
if not m:
return kw, vc
value = m.group("strg") or ""
value = value.rstrip().replace("''", "'")
if value and value[-1] == "&":
value = value[:-1]
values.append(value)
comment = m.group("comm")
if comment:
comments.append(comment.rstrip())
if keyword in self._commentary_keywords:
valuecomment = "".join(values)
else:
# CONTINUE card
valuecomment = f"'{''.join(values)}' / {' '.join(comments)}"
return keyword, valuecomment
if self.keyword in self._special_keywords:
keyword, valuecomment = image.split(" ", 1)
else:
try:
delim_index = image.index(self._value_indicator)
except ValueError:
delim_index = None
# The equal sign may not be any higher than column 10; anything
# past that must be considered part of the card value
if delim_index is None:
keyword = image[:KEYWORD_LENGTH]
valuecomment = image[KEYWORD_LENGTH:]
elif delim_index > 10 and image[:9] != "HIERARCH ":
keyword = image[:8]
valuecomment = image[8:]
else:
keyword, valuecomment = image.split(self._value_indicator, 1)
return keyword.strip(), valuecomment.strip()
def _fix_keyword(self):
if self.field_specifier:
keyword, field_specifier = self._keyword.split(".", 1)
self._keyword = ".".join([keyword.upper(), field_specifier])
else:
self._keyword = self._keyword.upper()
self._modified = True
def _fix_value(self):
"""Fix the card image for fixable non-standard compliance."""
value = None
keyword, valuecomment = self._split()
m = self._value_NFSC_RE.match(valuecomment)
# for the unparsable case
if m is None:
try:
value, comment = valuecomment.split("/", 1)
self.value = value.strip()
self.comment = comment.strip()
except (ValueError, IndexError):
self.value = valuecomment
self._valuestring = self._value
return
elif m.group("numr") is not None:
numr = self._number_NFSC_RE.match(m.group("numr"))
value = translate(numr.group("digt"), FIX_FP_TABLE, " ")
if numr.group("sign") is not None:
value = numr.group("sign") + value
elif m.group("cplx") is not None:
real = self._number_NFSC_RE.match(m.group("real"))
rdigt = translate(real.group("digt"), FIX_FP_TABLE, " ")
if real.group("sign") is not None:
rdigt = real.group("sign") + rdigt
imag = self._number_NFSC_RE.match(m.group("imag"))
idigt = translate(imag.group("digt"), FIX_FP_TABLE, " ")
if imag.group("sign") is not None:
idigt = imag.group("sign") + idigt
value = f"({rdigt}, {idigt})"
self._valuestring = value
# The value itself has not been modified, but its serialized
# representation (as stored in self._valuestring) has been changed, so
# still set this card as having been modified (see ticket #137)
self._modified = True
def _format_keyword(self):
if self.keyword:
if self.field_specifier:
return "{:{len}}".format(
self.keyword.split(".", 1)[0], len=KEYWORD_LENGTH
)
elif self._hierarch:
return f"HIERARCH {self.keyword} "
else:
return "{:{len}}".format(self.keyword, len=KEYWORD_LENGTH)
else:
return " " * KEYWORD_LENGTH
def _format_value(self):
# value string
float_types = (float, np.floating, complex, np.complexfloating)
# Force the value to be parsed out first
value = self.value
# But work with the underlying raw value instead (to preserve
# whitespace, for now...)
value = self._value
if self.keyword in self._commentary_keywords:
# The value of a commentary card must be just a raw unprocessed
# string
value = str(value)
elif (
self._valuestring
and not self._valuemodified
and isinstance(self.value, float_types)
):
# Keep the existing formatting for float/complex numbers
value = f"{self._valuestring:>20}"
elif self.field_specifier:
value = _format_value(self._value).strip()
value = f"'{self.field_specifier}: {value}'"
else:
value = _format_value(value)
# For HIERARCH cards the value should be shortened to conserve space
if not self.field_specifier and len(self.keyword) > KEYWORD_LENGTH:
value = value.strip()
return value
def _format_comment(self):
if not self.comment:
return ""
else:
return f" / {self._comment}"
def _format_image(self):
keyword = self._format_keyword()
value = self._format_value()
is_commentary = keyword.strip() in self._commentary_keywords
if is_commentary:
comment = ""
else:
comment = self._format_comment()
# equal sign string
# by default use the standard value indicator even for HIERARCH cards;
# later we may abbreviate it if necessary
delimiter = VALUE_INDICATOR
if is_commentary:
delimiter = ""
# put all parts together
output = "".join([keyword, delimiter, value, comment])
# For HIERARCH cards we can save a bit of space if necessary by
# removing the space between the keyword and the equals sign; I'm
# guessing this is part of the HIEARCH card specification
keywordvalue_length = len(keyword) + len(delimiter) + len(value)
if keywordvalue_length > self.length and keyword.startswith("HIERARCH"):
if keywordvalue_length == self.length + 1 and keyword[-1] == " ":
output = "".join([keyword[:-1], delimiter, value, comment])
else:
# I guess the HIERARCH card spec is incompatible with CONTINUE
# cards
raise ValueError(
f"The header keyword {self.keyword!r} with its value is too long"
)
if len(output) <= self.length:
output = f"{output:80}"
else:
# longstring case (CONTINUE card)
# try not to use CONTINUE if the string value can fit in one line.
# Instead, just truncate the comment
if isinstance(self.value, str) and len(value) > (self.length - 10):
output = self._format_long_image()
else:
warnings.warn(
"Card is too long, comment will be truncated.", VerifyWarning
)
output = output[: Card.length]
return output
def _format_long_image(self):
"""
Break up long string value/comment into ``CONTINUE`` cards.
This is a primitive implementation: it will put the value
string in one block and the comment string in another. Also,
it does not break at the blank space between words. So it may
not look pretty.
"""
if self.keyword in Card._commentary_keywords:
return self._format_long_commentary_image()
value_length = 67
comment_length = 64
output = []
# do the value string
value = self._value.replace("'", "''")
words = _words_group(value, value_length)
for idx, word in enumerate(words):
if idx == 0:
headstr = "{:{len}}= ".format(self.keyword, len=KEYWORD_LENGTH)
else:
headstr = "CONTINUE "
# If this is the final CONTINUE remove the '&'
if not self.comment and idx == len(words) - 1:
value_format = "'{}'"
else:
value_format = "'{}&'"
value = value_format.format(word)
output.append(f"{headstr + value:80}")
# do the comment string
comment_format = "{}"
if self.comment:
words = _words_group(self.comment, comment_length)
for idx, word in enumerate(words):
# If this is the final CONTINUE remove the '&'
if idx == len(words) - 1:
headstr = "CONTINUE '' / "
else:
headstr = "CONTINUE '&' / "
comment = headstr + comment_format.format(word)
output.append(f"{comment:80}")
return "".join(output)
def _format_long_commentary_image(self):
"""
If a commentary card's value is too long to fit on a single card, this
will render the card as multiple consecutive commentary card of the
same type.
"""
maxlen = Card.length - KEYWORD_LENGTH
value = self._format_value()
output = []
idx = 0
while idx < len(value):
output.append(str(Card(self.keyword, value[idx : idx + maxlen])))
idx += maxlen
return "".join(output)
def _verify(self, option="warn"):
errs = []
fix_text = f"Fixed {self.keyword!r} card to meet the FITS standard."
# Don't try to verify cards that already don't meet any recognizable
# standard
if self._invalid:
return _ErrList(errs)
# verify the equal sign position
if self.keyword not in self._commentary_keywords and (
self._image
and self._image[:9].upper() != "HIERARCH "
and self._image.find("=") != 8
):
errs.append(
dict(
err_text=(
"Card {!r} is not FITS standard (equal sign not "
"at column 8).".format(self.keyword)
),
fix_text=fix_text,
fix=self._fix_value,
)
)
# verify the key, it is never fixable
# always fix silently the case where "=" is before column 9,
# since there is no way to communicate back to the _keys.
if (self._image and self._image[:8].upper() == "HIERARCH") or self._hierarch:
pass
else:
if self._image:
# PyFITS will auto-uppercase any standard keyword, so lowercase
# keywords can only occur if they came from the wild
keyword = self._split()[0]
if keyword != keyword.upper():
# Keyword should be uppercase unless it's a HIERARCH card
errs.append(
dict(
err_text=f"Card keyword {keyword!r} is not upper case.",
fix_text=fix_text,
fix=self._fix_keyword,
)
)
keyword = self.keyword
if self.field_specifier:
keyword = keyword.split(".", 1)[0]
if not self._keywd_FSC_RE.match(keyword):
errs.append(
dict(err_text=f"Illegal keyword name {keyword!r}", fixable=False)
)
# verify the value, it may be fixable
keyword, valuecomment = self._split()
if self.keyword in self._commentary_keywords:
# For commentary keywords all that needs to be ensured is that it
# contains only printable ASCII characters
if not self._ascii_text_re.match(valuecomment):
errs.append(
dict(
err_text=(
"Unprintable string {!r}; commentary cards may "
"only contain printable ASCII characters".format(
valuecomment
)
),
fixable=False,
)
)
else:
if not self._valuemodified:
m = self._value_FSC_RE.match(valuecomment)
# If the value of a card was replaced before the card was ever
# even verified, the new value can be considered valid, so we
# don't bother verifying the old value. See
# https://github.com/astropy/astropy/issues/5408
if m is None:
errs.append(
dict(
err_text=(
f"Card {self.keyword!r} is not FITS standard "
f"(invalid value string: {valuecomment!r})."
),
fix_text=fix_text,
fix=self._fix_value,
)
)
# verify the comment (string), it is never fixable
m = self._value_NFSC_RE.match(valuecomment)
if m is not None:
comment = m.group("comm")
if comment is not None:
if not self._ascii_text_re.match(comment):
errs.append(
dict(
err_text=(
f"Unprintable string {comment!r}; header "
"comments may only contain printable "
"ASCII characters"
),
fixable=False,
)
)
errs = _ErrList([self.run_option(option, **err) for err in errs])
self._verified = True
return errs
def _itersubcards(self):
"""
If the card image is greater than 80 characters, it should consist of a
normal card followed by one or more CONTINUE card. This method returns
the subcards that make up this logical card.
This can also support the case where a HISTORY or COMMENT card has a
long value that is stored internally as multiple concatenated card
images.
"""
ncards = len(self._image) // Card.length
for idx in range(0, Card.length * ncards, Card.length):
card = Card.fromstring(self._image[idx : idx + Card.length])
if idx > 0 and card.keyword.upper() not in self._special_keywords:
raise VerifyError(
"Long card images must have CONTINUE cards after "
"the first card or have commentary keywords like "
"HISTORY or COMMENT."
)
if not isinstance(card.value, str):
raise VerifyError("CONTINUE cards must have string values.")
yield card
def _int_or_float(s):
"""
Converts an a string to an int if possible, or to a float.
If the string is neither a string or a float a value error is raised.
"""
if isinstance(s, float):
# Already a float so just pass through
return s
try:
return int(s)
except (ValueError, TypeError):
try:
return float(s)
except (ValueError, TypeError) as e:
raise ValueError(str(e))
def _format_value(value):
"""
Converts a card value to its appropriate string representation as
defined by the FITS format.
"""
# string value should occupies at least 8 columns, unless it is
# a null string
if isinstance(value, str):
if value == "":
return "''"
else:
exp_val_str = value.replace("'", "''")
val_str = f"'{exp_val_str:8}'"
return f"{val_str:20}"
# must be before int checking since bool is also int
elif isinstance(value, (bool, np.bool_)):
return f"{repr(value)[0]:>20}" # T or F
elif _is_int(value):
return f"{value:>20d}"
elif isinstance(value, (float, np.floating)):
return f"{_format_float(value):>20}"
elif isinstance(value, (complex, np.complexfloating)):
val_str = f"({_format_float(value.real)}, {_format_float(value.imag)})"
return f"{val_str:>20}"
elif isinstance(value, Undefined):
return ""
else:
return ""
def _format_float(value):
"""Format a floating number to make sure it gets the decimal point."""
value_str = f"{value:.16G}"
if "." not in value_str and "E" not in value_str:
value_str += ".0"
elif "E" in value_str:
# On some Windows builds of Python (and possibly other platforms?) the
# exponent is zero-padded out to, it seems, three digits. Normalize
# the format to pad only to two digits.
significand, exponent = value_str.split("E")
if exponent[0] in ("+", "-"):
sign = exponent[0]
exponent = exponent[1:]
else:
sign = ""
value_str = f"{significand}E{sign}{int(exponent):02d}"
# Limit the value string to at most 20 characters.
str_len = len(value_str)
if str_len > 20:
idx = value_str.find("E")
if idx < 0:
value_str = value_str[:20]
else:
value_str = value_str[: 20 - (str_len - idx)] + value_str[idx:]
return value_str
def _pad(input):
"""Pad blank space to the input string to be multiple of 80."""
_len = len(input)
if _len == Card.length:
return input
elif _len > Card.length:
strlen = _len % Card.length
if strlen == 0:
return input
else:
return input + " " * (Card.length - strlen)
# minimum length is 80
else:
strlen = _len % Card.length
return input + " " * (Card.length - strlen)
|
ffae35f8f558c66de3af4929ad80fe0813cc9427a7e2e41b70fde17fd30db614 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import operator
import warnings
from astropy.utils import indent
from astropy.utils.exceptions import AstropyUserWarning
class VerifyError(Exception):
"""
Verify exception class.
"""
class VerifyWarning(AstropyUserWarning):
"""
Verify warning class.
"""
VERIFY_OPTIONS = [
"ignore",
"warn",
"exception",
"fix",
"silentfix",
"fix+ignore",
"fix+warn",
"fix+exception",
"silentfix+ignore",
"silentfix+warn",
"silentfix+exception",
]
class _Verify:
"""
Shared methods for verification.
"""
def run_option(
self, option="warn", err_text="", fix_text="Fixed.", fix=None, fixable=True
):
"""
Execute the verification with selected option.
"""
text = err_text
if option in ["warn", "exception"]:
fixable = False
# fix the value
elif not fixable:
text = f"Unfixable error: {text}"
else:
if fix:
fix()
text += " " + fix_text
return (fixable, text)
def verify(self, option="warn"):
"""
Verify all values in the instance.
Parameters
----------
option : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``"+warn"``, or ``"+exception"``
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
"""
opt = option.lower()
if opt not in VERIFY_OPTIONS:
raise ValueError(f"Option {option!r} not recognized.")
if opt == "ignore":
return
errs = self._verify(opt)
# Break the verify option into separate options related to reporting of
# errors, and fixing of fixable errors
if "+" in opt:
fix_opt, report_opt = opt.split("+")
elif opt in ["fix", "silentfix"]:
# The original default behavior for 'fix' and 'silentfix' was to
# raise an exception for unfixable errors
fix_opt, report_opt = opt, "exception"
else:
fix_opt, report_opt = None, opt
if fix_opt == "silentfix" and report_opt == "ignore":
# Fixable errors were fixed, but don't report anything
return
if fix_opt == "silentfix":
# Don't print out fixable issues; the first element of each verify
# item is a boolean indicating whether or not the issue was fixable
line_filter = lambda x: not x[0]
elif fix_opt == "fix" and report_opt == "ignore":
# Don't print *unfixable* issues, but do print fixed issues; this
# is probably not very useful but the option exists for
# completeness
line_filter = operator.itemgetter(0)
else:
line_filter = None
unfixable = False
messages = []
for fixable, message in errs.iter_lines(filter=line_filter):
if fixable is not None:
unfixable = not fixable
messages.append(message)
if messages:
messages.insert(0, "Verification reported errors:")
messages.append("Note: astropy.io.fits uses zero-based indexing.\n")
if fix_opt == "silentfix" and not unfixable:
return
elif report_opt == "warn" or (fix_opt == "fix" and not unfixable):
for line in messages:
warnings.warn(line, VerifyWarning)
else:
raise VerifyError("\n" + "\n".join(messages))
class _ErrList(list):
"""
Verification errors list class. It has a nested list structure
constructed by error messages generated by verifications at
different class levels.
"""
def __init__(self, val=(), unit="Element"):
super().__init__(val)
self.unit = unit
def __str__(self):
return "\n".join(item[1] for item in self.iter_lines())
def iter_lines(self, filter=None, shift=0):
"""
Iterate the nested structure as a list of strings with appropriate
indentations for each level of structure.
"""
element = 0
# go through the list twice, first time print out all top level
# messages
for item in self:
if not isinstance(item, _ErrList):
if filter is None or filter(item):
yield item[0], indent(item[1], shift=shift)
# second time go through the next level items, each of the next level
# must present, even it has nothing.
for item in self:
if isinstance(item, _ErrList):
next_lines = item.iter_lines(filter=filter, shift=shift + 1)
try:
first_line = next(next_lines)
except StopIteration:
first_line = None
if first_line is not None:
if self.unit:
# This line is sort of a header for the next level in
# the hierarchy
yield None, indent(f"{self.unit} {element}:", shift=shift)
yield first_line
yield from next_lines
element += 1
|
82382bc451297ab3252dc17967fe427faae0e6e4cd966a6430bcf0a2d6427d69 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
cds.py:
Classes to read CDS / Vizier table format
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import fnmatch
import itertools
import os
import re
from contextlib import suppress
from astropy.units import Unit
from . import core, fixedwidth
__doctest_skip__ = ["*"]
class CdsHeader(core.BaseHeader):
_subfmt = "CDS"
col_type_map = {
"e": core.FloatType,
"f": core.FloatType,
"i": core.IntType,
"a": core.StrType,
}
"The ReadMe file to construct header from."
readme = None
def get_type_map_key(self, col):
match = re.match(r"\d*(\S)", col.raw_type.lower())
if not match:
raise ValueError(
f'Unrecognized {self._subfmt} format "{col.raw_type}" for column'
f'"{col.name}"'
)
return match.group(1)
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines`` for a CDS/MRT
header.
Parameters
----------
lines : list
List of table lines
"""
# Read header block for the table ``self.data.table_name`` from the read
# me file ``self.readme``.
if self.readme and self.data.table_name:
in_header = False
readme_inputter = core.BaseInputter()
f = readme_inputter.get_lines(self.readme)
# Header info is not in data lines but in a separate file.
lines = []
comment_lines = 0
for line in f:
line = line.strip()
if in_header:
lines.append(line)
if line.startswith(("------", "=======")):
comment_lines += 1
if comment_lines == 3:
break
else:
match = re.match(
r"Byte-by-byte Description of file: (?P<name>.+)$",
line,
re.IGNORECASE,
)
if match:
# Split 'name' in case in contains multiple files
names = [s for s in re.split("[, ]+", match.group("name")) if s]
# Iterate on names to find if one matches the tablename
# including wildcards.
for pattern in names:
if fnmatch.fnmatch(self.data.table_name, pattern):
in_header = True
lines.append(line)
break
else:
raise core.InconsistentTableError(
f"Can't find table {self.data.table_name} in {self.readme}"
)
found_line = False
for i_col_def, line in enumerate(lines):
if re.match(r"Byte-by-byte Description", line, re.IGNORECASE):
found_line = True
elif found_line: # First line after list of file descriptions
i_col_def -= 1 # Set i_col_def to last description line
break
else:
raise ValueError('no line with "Byte-by-byte Description" found')
re_col_def = re.compile(
r"""\s*
(?P<start> \d+ \s* -)? \s*
(?P<end> \d+) \s+
(?P<format> [\w.]+) \s+
(?P<units> \S+) \s+
(?P<name> \S+)
(\s+ (?P<descr> \S.*))?""",
re.VERBOSE,
)
cols = []
for line in itertools.islice(lines, i_col_def + 4, None):
if line.startswith(("------", "=======")):
break
match = re_col_def.match(line)
if match:
col = core.Column(name=match.group("name"))
col.start = int(
re.sub(r'[-\s]', '', match.group('start') or match.group('end'))) - 1 # fmt: skip
col.end = int(match.group("end"))
unit = match.group("units")
if unit == "---":
col.unit = None # "---" is the marker for no unit in CDS/MRT table
else:
col.unit = Unit(unit, format="cds", parse_strict="warn")
col.description = (match.group("descr") or "").strip()
col.raw_type = match.group("format")
col.type = self.get_col_type(col)
match = re.match(
# Matches limits specifier (eg []) that may or may not be
# present
r"(?P<limits>[\[\]] \S* [\[\]])?"
# Matches '?' directly
r"\?"
# Matches to nullval if and only if '=' is present
r"((?P<equal>=)(?P<nullval> \S*))?"
# Matches to order specifier: ('+', '-', '+=', '-=')
r"(?P<order>[-+]?[=]?)"
# Matches description text even even if no whitespace is
# present after '?'
r"(\s* (?P<descriptiontext> \S.*))?",
col.description,
re.VERBOSE,
)
if match:
col.description = (match.group("descriptiontext") or "").strip()
if issubclass(col.type, core.FloatType):
fillval = "nan"
else:
fillval = "0"
if match.group("nullval") == "-":
col.null = "---"
# CDS/MRT tables can use -, --, ---, or ---- to mark missing values
# see https://github.com/astropy/astropy/issues/1335
for i in [1, 2, 3, 4]:
self.data.fill_values.append(("-" * i, fillval, col.name))
else:
col.null = match.group("nullval")
if col.null is None:
col.null = ""
self.data.fill_values.append((col.null, fillval, col.name))
cols.append(col)
else: # could be a continuation of the previous col's description
if cols:
cols[-1].description += line.strip()
else:
raise ValueError(f'Line "{line}" not parsable as CDS header')
self.names = [x.name for x in cols]
self.cols = cols
class CdsData(core.BaseData):
"""CDS table data reader."""
_subfmt = "CDS"
splitter_class = fixedwidth.FixedWidthSplitter
def process_lines(self, lines):
"""Skip over CDS/MRT header by finding the last section delimiter."""
# If the header has a ReadMe and data has a filename
# then no need to skip, as the data lines do not have header
# info. The ``read`` method adds the table_name to the ``data``
# attribute.
if self.header.readme and self.table_name:
return lines
i_sections = [
i for i, x in enumerate(lines) if x.startswith(("------", "======="))
]
if not i_sections:
raise core.InconsistentTableError(
f"No {self._subfmt} section delimiter found"
)
return lines[i_sections[-1] + 1 :]
class Cds(core.BaseReader):
"""CDS format table.
See: http://vizier.u-strasbg.fr/doc/catstd.htx
Example::
Table: Table name here
= ==============================================================================
Catalog reference paper
Bibliography info here
================================================================================
ADC_Keywords: Keyword ; Another keyword ; etc
Description:
Catalog description here.
================================================================================
Byte-by-byte Description of file: datafile3.txt
--------------------------------------------------------------------------------
Bytes Format Units Label Explanations
--------------------------------------------------------------------------------
1- 3 I3 --- Index Running identification number
5- 6 I2 h RAh Hour of Right Ascension (J2000)
8- 9 I2 min RAm Minute of Right Ascension (J2000)
11- 15 F5.2 s RAs Second of Right Ascension (J2000)
--------------------------------------------------------------------------------
Note (1): A CDS file can contain sections with various metadata.
Notes can be multiple lines.
Note (2): Another note.
--------------------------------------------------------------------------------
1 03 28 39.09
2 04 18 24.11
**About parsing the CDS format**
The CDS format consists of a table description and the table data. These
can be in separate files as a ``ReadMe`` file plus data file(s), or
combined in a single file. Different subsections within the description
are separated by lines of dashes or equal signs ("------" or "======").
The table which specifies the column information must be preceded by a line
starting with "Byte-by-byte Description of file:".
In the case where the table description is combined with the data values,
the data must be in the last section and must be preceded by a section
delimiter line (dashes or equal signs only).
**Basic usage**
Use the ``ascii.read()`` function as normal, with an optional ``readme``
parameter indicating the CDS ReadMe file. If not supplied it is assumed that
the header information is at the top of the given table. Examples::
>>> from astropy.io import ascii
>>> table = ascii.read("data/cds.dat")
>>> table = ascii.read("data/vizier/table1.dat", readme="data/vizier/ReadMe")
>>> table = ascii.read("data/cds/multi/lhs2065.dat", readme="data/cds/multi/ReadMe")
>>> table = ascii.read("data/cds/glob/lmxbrefs.dat", readme="data/cds/glob/ReadMe")
The table name and the CDS ReadMe file can be entered as URLs. This can be used
to directly load tables from the Internet. For example, Vizier tables from the
CDS::
>>> table = ascii.read("ftp://cdsarc.u-strasbg.fr/pub/cats/VII/253/snrs.dat",
... readme="ftp://cdsarc.u-strasbg.fr/pub/cats/VII/253/ReadMe")
If the header (ReadMe) and data are stored in a single file and there
is content between the header and the data (for instance Notes), then the
parsing process may fail. In this case you can instruct the reader to
guess the actual start of the data by supplying ``data_start='guess'`` in the
call to the ``ascii.read()`` function. You should verify that the output
data table matches expectation based on the input CDS file.
**Using a reader object**
When ``Cds`` reader object is created with a ``readme`` parameter
passed to it at initialization, then when the ``read`` method is
executed with a table filename, the header information for the
specified table is taken from the ``readme`` file. An
``InconsistentTableError`` is raised if the ``readme`` file does not
have header information for the given table.
>>> readme = "data/vizier/ReadMe"
>>> r = ascii.get_reader(ascii.Cds, readme=readme)
>>> table = r.read("data/vizier/table1.dat")
>>> # table5.dat has the same ReadMe file
>>> table = r.read("data/vizier/table5.dat")
If no ``readme`` parameter is specified, then the header
information is assumed to be at the top of the given table.
>>> r = ascii.get_reader(ascii.Cds)
>>> table = r.read("data/cds.dat")
>>> #The following gives InconsistentTableError, since no
>>> #readme file was given and table1.dat does not have a header.
>>> table = r.read("data/vizier/table1.dat")
Traceback (most recent call last):
...
InconsistentTableError: No CDS section delimiter found
Caveats:
* The Units and Explanations are available in the column ``unit`` and
``description`` attributes, respectively.
* The other metadata defined by this format is not available in the output table.
"""
_format_name = "cds"
_io_registry_format_aliases = ["cds"]
_io_registry_can_write = False
_description = "CDS format table"
data_class = CdsData
header_class = CdsHeader
def __init__(self, readme=None):
super().__init__()
self.header.readme = readme
def write(self, table=None):
"""Not available for the CDS class (raises NotImplementedError)."""
raise NotImplementedError
def read(self, table):
# If the read kwarg `data_start` is 'guess' then the table may have extraneous
# lines between the end of the header and the beginning of data.
if self.data.start_line == "guess":
# Replicate the first part of BaseReader.read up to the point where
# the table lines are initially read in.
with suppress(TypeError):
# For strings only
if os.linesep not in table + "":
self.data.table_name = os.path.basename(table)
self.data.header = self.header
self.header.data = self.data
# Get a list of the lines (rows) in the table
lines = self.inputter.get_lines(table)
# Now try increasing data.start_line by one until the table reads successfully.
# For efficiency use the in-memory list of lines instead of `table`, which
# could be a file.
for data_start in range(len(lines)):
self.data.start_line = data_start
with suppress(Exception):
table = super().read(lines)
return table
else:
return super().read(table)
|
a79dacaf155ed04bc6baf3831b9a60af6ccccadebecd9ae5adaefc1d8994995d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
basic.py:
Basic table read / write functionality for simple character
delimited files with various options for column header definition.
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import re
from . import core
class BasicHeader(core.BaseHeader):
"""
Basic table Header Reader.
Set a few defaults for common ascii table formats
(start at line 0, comments begin with ``#`` and possibly white space)
"""
start_line = 0
comment = r"\s*#"
write_comment = "# "
class BasicData(core.BaseData):
"""
Basic table Data Reader.
Set a few defaults for common ascii table formats
(start at line 1, comments begin with ``#`` and possibly white space)
"""
start_line = 1
comment = r"\s*#"
write_comment = "# "
class Basic(core.BaseReader):
r"""Character-delimited table with a single header line at the top.
Lines beginning with a comment character (default='#') as the first
non-whitespace character are comments.
Example table::
# Column definition is the first uncommented line
# Default delimiter is the space character.
apples oranges pears
# Data starts after the header column definition, blank lines ignored
1 2 3
4 5 6
"""
_format_name = "basic"
_description = "Basic table with custom delimiters"
_io_registry_format_aliases = ["ascii"]
header_class = BasicHeader
data_class = BasicData
class NoHeaderHeader(BasicHeader):
"""
Reader for table header without a header.
Set the start of header line number to `None`, which tells the basic
reader there is no header line.
"""
start_line = None
class NoHeaderData(BasicData):
"""
Reader for table data without a header.
Data starts at first uncommented line since there is no header line.
"""
start_line = 0
class NoHeader(Basic):
"""Character-delimited table with no header line.
When reading, columns are autonamed using header.auto_format which defaults
to "col%d". Otherwise this reader the same as the :class:`Basic` class
from which it is derived. Example::
# Table data
1 2 "hello there"
3 4 world
"""
_format_name = "no_header"
_description = "Basic table with no headers"
header_class = NoHeaderHeader
data_class = NoHeaderData
class CommentedHeaderHeader(BasicHeader):
"""
Header class for which the column definition line starts with the
comment character. See the :class:`CommentedHeader` class for an example.
"""
def process_lines(self, lines):
"""
Return only lines that start with the comment regexp. For these
lines strip out the matching characters.
"""
re_comment = re.compile(self.comment)
for line in lines:
match = re_comment.match(line)
if match:
yield line[match.end() :]
def write(self, lines):
lines.append(self.write_comment + self.splitter.join(self.colnames))
class CommentedHeader(Basic):
"""Character-delimited table with column names in a comment line.
When reading, ``header_start`` can be used to specify the
line index of column names, and it can be a negative index (for example -1
for the last commented line). The default delimiter is the <space>
character.
This matches the format produced by ``np.savetxt()``, with ``delimiter=','``,
and ``header='<comma-delimited-column-names-list>'``.
Example::
# col1 col2 col3
# Comment line
1 2 3
4 5 6
"""
_format_name = "commented_header"
_description = "Column names in a commented line"
header_class = CommentedHeaderHeader
data_class = NoHeaderData
def read(self, table):
"""
Read input data (file-like object, filename, list of strings, or
single string) into a Table and return the result.
"""
out = super().read(table)
# Strip off the comment line set as the header line for
# commented_header format (first by default).
if "comments" in out.meta:
idx = self.header.start_line
if idx < 0:
idx = len(out.meta["comments"]) + idx
out.meta["comments"] = (
out.meta["comments"][:idx] + out.meta["comments"][idx + 1 :]
)
if not out.meta["comments"]:
del out.meta["comments"]
return out
def write_header(self, lines, meta):
"""
Write comment lines after, rather than before, the header.
"""
self.header.write(lines)
self.header.write_comments(lines, meta)
class TabHeaderSplitter(core.DefaultSplitter):
"""Split lines on tab and do not remove whitespace."""
delimiter = "\t"
def process_line(self, line):
return line + "\n"
class TabDataSplitter(TabHeaderSplitter):
"""
Don't strip data value whitespace since that is significant in TSV tables.
"""
process_val = None
skipinitialspace = False
class TabHeader(BasicHeader):
"""
Reader for header of tables with tab separated header.
"""
splitter_class = TabHeaderSplitter
class TabData(BasicData):
"""
Reader for data of tables with tab separated data.
"""
splitter_class = TabDataSplitter
class Tab(Basic):
"""Tab-separated table.
Unlike the :class:`Basic` reader, whitespace is not stripped from the
beginning and end of either lines or individual column values.
Example::
col1 <tab> col2 <tab> col3
# Comment line
1 <tab> 2 <tab> 5
"""
_format_name = "tab"
_description = "Basic table with tab-separated values"
header_class = TabHeader
data_class = TabData
class CsvSplitter(core.DefaultSplitter):
"""
Split on comma for CSV (comma-separated-value) tables.
"""
delimiter = ","
class CsvHeader(BasicHeader):
"""
Header that uses the :class:`astropy.io.ascii.basic.CsvSplitter`.
"""
splitter_class = CsvSplitter
comment = None
write_comment = None
class CsvData(BasicData):
"""
Data that uses the :class:`astropy.io.ascii.basic.CsvSplitter`.
"""
splitter_class = CsvSplitter
fill_values = [(core.masked, "")]
comment = None
write_comment = None
class Csv(Basic):
"""CSV (comma-separated-values) table.
This file format may contain rows with fewer entries than the number of
columns, a situation that occurs in output from some spreadsheet editors.
The missing entries are marked as masked in the output table.
Masked values (indicated by an empty '' field value when reading) are
written out in the same way with an empty ('') field. This is different
from the typical default for `astropy.io.ascii` in which missing values are
indicated by ``--``.
Since the `CSV format <https://tools.ietf.org/html/rfc4180>`_ does not
formally support comments, any comments defined for the table via
``tbl.meta['comments']`` are ignored by default. If you would still like to
write those comments then include a keyword ``comment='#'`` to the
``write()`` call.
Example::
num,ra,dec,radius,mag
1,32.23222,10.1211
2,38.12321,-88.1321,2.2,17.0
"""
_format_name = "csv"
_io_registry_format_aliases = ["csv"]
_io_registry_can_write = True
_io_registry_suffix = ".csv"
_description = "Comma-separated-values"
header_class = CsvHeader
data_class = CsvData
def inconsistent_handler(self, str_vals, ncols):
"""
Adjust row if it is too short.
If a data row is shorter than the header, add empty values to make it the
right length.
Note that this will *not* be called if the row already matches the header.
Parameters
----------
str_vals : list
A list of value strings from the current row of the table.
ncols : int
The expected number of entries from the table header.
Returns
-------
str_vals : list
List of strings to be parsed into data entries in the output table.
"""
if len(str_vals) < ncols:
str_vals.extend((ncols - len(str_vals)) * [""])
return str_vals
class RdbHeader(TabHeader):
"""
Header for RDB tables.
"""
col_type_map = {"n": core.NumType, "s": core.StrType}
def get_type_map_key(self, col):
return col.raw_type[-1]
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines``.
This is a specialized get_cols for the RDB type:
Line 0: RDB col names
Line 1: RDB col definitions
Line 2+: RDB data rows
Parameters
----------
lines : list
List of table lines
Returns
-------
None
"""
header_lines = self.process_lines(lines) # this is a generator
header_vals_list = [hl for _, hl in zip(range(2), self.splitter(header_lines))]
if len(header_vals_list) != 2:
raise ValueError("RDB header requires 2 lines")
self.names, raw_types = header_vals_list
if len(self.names) != len(raw_types):
raise core.InconsistentTableError(
"RDB header mismatch between number of column names and column types."
)
if any(not re.match(r"\d*(N|S)$", x, re.IGNORECASE) for x in raw_types):
raise core.InconsistentTableError(
f"RDB types definitions do not all match [num](N|S): {raw_types}"
)
self._set_cols_from_names()
for col, raw_type in zip(self.cols, raw_types):
col.raw_type = raw_type
col.type = self.get_col_type(col)
def write(self, lines):
lines.append(self.splitter.join(self.colnames))
rdb_types = []
for col in self.cols:
# Check if dtype.kind is string or unicode. See help(np.core.numerictypes)
rdb_type = "S" if col.info.dtype.kind in ("S", "U") else "N"
rdb_types.append(rdb_type)
lines.append(self.splitter.join(rdb_types))
class RdbData(TabData):
"""
Data reader for RDB data. Starts reading at line 2.
"""
start_line = 2
class Rdb(Tab):
"""Tab-separated file with an extra line after the column definition line that
specifies either numeric (N) or string (S) data.
See: https://www.drdobbs.com/rdb-a-unix-command-line-database/199101326
Example::
col1 <tab> col2 <tab> col3
N <tab> S <tab> N
1 <tab> 2 <tab> 5
"""
_format_name = "rdb"
_io_registry_format_aliases = ["rdb"]
_io_registry_suffix = ".rdb"
_description = "Tab-separated with a type definition header line"
header_class = RdbHeader
data_class = RdbData
|
14603332851a5c71b5f5833bbe8f1198b0b566204b77688d9fedf3d6637eed10 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
core.py:
Core base classes and functions for reading and writing tables.
:Copyright: Smithsonian Astrophysical Observatory (2010)
:Author: Tom Aldcroft ([email protected])
"""
import copy
import csv
import fnmatch
import functools
import inspect
import itertools
import operator
import os
import re
import warnings
from collections import OrderedDict
from contextlib import suppress
from io import StringIO
import numpy
from astropy.table import Table
from astropy.utils.data import get_readable_fileobj
from astropy.utils.exceptions import AstropyWarning
from . import connect
from .docs import READ_DOCSTRING, WRITE_DOCSTRING
# Global dictionary mapping format arg to the corresponding Reader class
FORMAT_CLASSES = {}
# Similar dictionary for fast readers
FAST_CLASSES = {}
def _check_multidim_table(table, max_ndim):
"""Check that ``table`` has only columns with ndim <= ``max_ndim``.
Currently ECSV is the only built-in format that supports output of arbitrary
N-d columns, but HTML supports 2-d.
"""
# No limit?
if max_ndim is None:
return
# Check for N-d columns
nd_names = [col.info.name for col in table.itercols() if len(col.shape) > max_ndim]
if nd_names:
raise ValueError(
f"column(s) with dimension > {max_ndim} "
"cannot be be written with this format, try using 'ecsv' "
"(Enhanced CSV) format"
)
class CsvWriter:
"""
Internal class to replace the csv writer ``writerow`` and ``writerows``
functions so that in the case of ``delimiter=' '`` and
``quoting=csv.QUOTE_MINIMAL``, the output field value is quoted for empty
fields (when value == '').
This changes the API slightly in that the writerow() and writerows()
methods return the output written string instead of the length of
that string.
Examples
--------
>>> from astropy.io.ascii.core import CsvWriter
>>> writer = CsvWriter(delimiter=' ')
>>> print(writer.writerow(['hello', '', 'world']))
hello "" world
"""
# Random 16-character string that gets injected instead of any
# empty fields and is then replaced post-write with doubled-quotechar.
# Created with:
# ''.join(random.choice(string.printable[:90]) for _ in range(16))
replace_sentinel = "2b=48Av%0-V3p>bX"
def __init__(self, csvfile=None, **kwargs):
self.csvfile = csvfile
# Temporary StringIO for catching the real csv.writer() object output
self.temp_out = StringIO()
self.writer = csv.writer(self.temp_out, **kwargs)
dialect = self.writer.dialect
self.quotechar2 = dialect.quotechar * 2
self.quote_empty = (dialect.quoting == csv.QUOTE_MINIMAL) and (
dialect.delimiter == " "
)
def writerow(self, values):
"""
Similar to csv.writer.writerow but with the custom quoting behavior.
Returns the written string instead of the length of that string.
"""
has_empty = False
# If QUOTE_MINIMAL and space-delimited then replace empty fields with
# the sentinel value.
if self.quote_empty:
for i, value in enumerate(values):
if value == "":
has_empty = True
values[i] = self.replace_sentinel
return self._writerow(self.writer.writerow, values, has_empty)
def writerows(self, values_list):
"""
Similar to csv.writer.writerows but with the custom quoting behavior.
Returns the written string instead of the length of that string.
"""
has_empty = False
# If QUOTE_MINIMAL and space-delimited then replace empty fields with
# the sentinel value.
if self.quote_empty:
for values in values_list:
for i, value in enumerate(values):
if value == "":
has_empty = True
values[i] = self.replace_sentinel
return self._writerow(self.writer.writerows, values_list, has_empty)
def _writerow(self, writerow_func, values, has_empty):
"""
Call ``writerow_func`` (either writerow or writerows) with ``values``.
If it has empty fields that have been replaced then change those
sentinel strings back to quoted empty strings, e.g. ``""``.
"""
# Clear the temporary StringIO buffer that self.writer writes into and
# then call the real csv.writer().writerow or writerows with values.
self.temp_out.seek(0)
self.temp_out.truncate()
writerow_func(values)
row_string = self.temp_out.getvalue()
if self.quote_empty and has_empty:
row_string = re.sub(self.replace_sentinel, self.quotechar2, row_string)
# self.csvfile is defined then write the output. In practice the pure
# Python writer calls with csvfile=None, while the fast writer calls with
# a file-like object.
if self.csvfile:
self.csvfile.write(row_string)
return row_string
class MaskedConstant(numpy.ma.core.MaskedConstant):
"""A trivial extension of numpy.ma.masked.
We want to be able to put the generic term ``masked`` into a dictionary.
The constant ``numpy.ma.masked`` is not hashable (see
https://github.com/numpy/numpy/issues/4660), so we need to extend it
here with a hash value.
See https://github.com/numpy/numpy/issues/11021 for rationale for
__copy__ and __deepcopy__ methods.
"""
def __hash__(self):
"""All instances of this class shall have the same hash."""
# Any large number will do.
return 1234567890
def __copy__(self):
"""This is a singleton so just return self."""
return self
def __deepcopy__(self, memo):
return self
masked = MaskedConstant()
class InconsistentTableError(ValueError):
"""
Indicates that an input table is inconsistent in some way.
The default behavior of ``BaseReader`` is to throw an instance of
this class if a data row doesn't match the header.
"""
class OptionalTableImportError(ImportError):
"""
Indicates that a dependency for table reading is not present.
An instance of this class is raised whenever an optional reader
with certain required dependencies cannot operate because of
an ImportError.
"""
class ParameterError(NotImplementedError):
"""
Indicates that a reader cannot handle a passed parameter.
The C-based fast readers in ``io.ascii`` raise an instance of
this error class upon encountering a parameter that the
C engine cannot handle.
"""
class FastOptionsError(NotImplementedError):
"""
Indicates that one of the specified options for fast
reading is invalid.
"""
class NoType:
"""
Superclass for ``StrType`` and ``NumType`` classes.
This class is the default type of ``Column`` and provides a base
class for other data types.
"""
class StrType(NoType):
"""
Indicates that a column consists of text data.
"""
class NumType(NoType):
"""
Indicates that a column consists of numerical data.
"""
class FloatType(NumType):
"""
Describes floating-point data.
"""
class BoolType(NoType):
"""
Describes boolean data.
"""
class IntType(NumType):
"""
Describes integer data.
"""
class AllType(StrType, FloatType, IntType):
"""
Subclass of all other data types.
This type is returned by ``convert_numpy`` if the given numpy
type does not match ``StrType``, ``FloatType``, or ``IntType``.
"""
class Column:
"""Table column.
The key attributes of a Column object are:
* **name** : column name
* **type** : column type (NoType, StrType, NumType, FloatType, IntType)
* **dtype** : numpy dtype (optional, overrides **type** if set)
* **str_vals** : list of column values as strings
* **fill_values** : dict of fill values
* **shape** : list of element shape (default [] => scalar)
* **data** : list of converted column values
* **subtype** : actual datatype for columns serialized with JSON
"""
def __init__(self, name):
self.name = name
self.type = NoType # Generic type (Int, Float, Str etc)
self.dtype = None # Numpy dtype if available
self.str_vals = []
self.fill_values = {}
self.shape = []
self.subtype = None
class BaseInputter:
"""
Get the lines from the table input and return a list of lines.
"""
encoding = None
"""Encoding used to read the file"""
def get_lines(self, table, newline=None):
"""Get the lines from the ``table`` input.
The input table can be one of:
* File name
* String (newline separated) with all header and data lines (must have at least 2 lines)
* File-like object with read() method
* List of strings
Parameters
----------
table : str, file-like, list
Can be either a file name, string (newline separated) with all header and data
lines (must have at least 2 lines), a file-like object with a
``read()`` method, or a list of strings.
newline :
Line separator. If `None` use OS default from ``splitlines()``.
Returns
-------
lines : list
List of lines
"""
try:
if hasattr(table, "read") or (
"\n" not in table + "" and "\r" not in table + ""
):
with get_readable_fileobj(table, encoding=self.encoding) as fileobj:
table = fileobj.read()
if newline is None:
lines = table.splitlines()
else:
lines = table.split(newline)
except TypeError:
try:
# See if table supports indexing, slicing, and iteration
table[0]
table[0:1]
iter(table)
if len(table) > 1:
lines = table
else:
# treat single entry as if string had been passed directly
if newline is None:
lines = table[0].splitlines()
else:
lines = table[0].split(newline)
except TypeError:
raise TypeError(
'Input "table" must be a string (filename or data) or an iterable'
)
return self.process_lines(lines)
def process_lines(self, lines):
"""Process lines for subsequent use. In the default case do nothing.
This routine is not generally intended for removing comment lines or
stripping whitespace. These are done (if needed) in the header and
data line processing.
Override this method if something more has to be done to convert raw
input lines to the table rows. For example the
ContinuationLinesInputter derived class accounts for continuation
characters if a row is split into lines.
"""
return lines
class BaseSplitter:
"""
Base splitter that uses python's split method to do the work.
This does not handle quoted values. A key feature is the formulation of
__call__ as a generator that returns a list of the split line values at
each iteration.
There are two methods that are intended to be overridden, first
``process_line()`` to do pre-processing on each input line before splitting
and ``process_val()`` to do post-processing on each split string value. By
default these apply the string ``strip()`` function. These can be set to
another function via the instance attribute or be disabled entirely, for
example::
reader.header.splitter.process_val = lambda x: x.lstrip()
reader.data.splitter.process_val = None
"""
delimiter = None
""" one-character string used to separate fields """
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. This is especially useful for
whitespace-delimited files to prevent spurious columns at the beginning or end.
"""
return line.strip()
def process_val(self, val):
"""Remove whitespace at the beginning or end of value."""
return val.strip()
def __call__(self, lines):
if self.process_line:
lines = (self.process_line(x) for x in lines)
for line in lines:
vals = line.split(self.delimiter)
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals):
if self.delimiter is None:
delimiter = " "
else:
delimiter = self.delimiter
return delimiter.join(str(x) for x in vals)
class DefaultSplitter(BaseSplitter):
"""Default class to split strings into columns using python csv. The class
attributes are taken from the csv Dialect class.
Typical usage::
# lines = ..
splitter = ascii.DefaultSplitter()
for col_vals in splitter(lines):
for col_val in col_vals:
...
"""
delimiter = " "
""" one-character string used to separate fields. """
quotechar = '"'
""" control how instances of *quotechar* in a field are quoted """
doublequote = True
""" character to remove special meaning from following character """
escapechar = None
""" one-character stringto quote fields containing special characters """
quoting = csv.QUOTE_MINIMAL
""" control when quotes are recognized by the reader """
skipinitialspace = True
""" ignore whitespace immediately following the delimiter """
csv_writer = None
csv_writer_out = StringIO()
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. This is especially useful for
whitespace-delimited files to prevent spurious columns at the beginning or end.
If splitting on whitespace then replace unquoted tabs with space first.
"""
if self.delimiter == r"\s":
line = _replace_tab_with_space(line, self.escapechar, self.quotechar)
return line.strip() + "\n"
def process_val(self, val):
"""Remove whitespace at the beginning or end of value."""
return val.strip(" \t")
def __call__(self, lines):
"""Return an iterator over the table ``lines``, where each iterator output
is a list of the split line values.
Parameters
----------
lines : list
List of table lines
Yields
------
line : list of str
Each line's split values.
"""
if self.process_line:
lines = [self.process_line(x) for x in lines]
delimiter = " " if self.delimiter == r"\s" else self.delimiter
csv_reader = csv.reader(
lines,
delimiter=delimiter,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar,
quoting=self.quoting,
skipinitialspace=self.skipinitialspace,
)
for vals in csv_reader:
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals):
delimiter = " " if self.delimiter is None else str(self.delimiter)
if self.csv_writer is None:
self.csv_writer = CsvWriter(
delimiter=delimiter,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar,
quoting=self.quoting,
)
if self.process_val:
vals = [self.process_val(x) for x in vals]
out = self.csv_writer.writerow(vals).rstrip("\r\n")
return out
def _replace_tab_with_space(line, escapechar, quotechar):
"""Replace tabs with spaces in given string, preserving quoted substrings.
Parameters
----------
line : str
String containing tabs to be replaced with spaces.
escapechar : str
Character in ``line`` used to escape special characters.
quotechar : str
Character in ``line`` indicating the start/end of a substring.
Returns
-------
line : str
A copy of ``line`` with tabs replaced by spaces, preserving quoted substrings.
"""
newline = []
in_quote = False
lastchar = "NONE"
for char in line:
if char == quotechar and lastchar != escapechar:
in_quote = not in_quote
if char == "\t" and not in_quote:
char = " "
lastchar = char
newline.append(char)
return "".join(newline)
def _get_line_index(line_or_func, lines):
"""Return the appropriate line index, depending on ``line_or_func`` which
can be either a function, a positive or negative int, or None.
"""
if hasattr(line_or_func, "__call__"):
return line_or_func(lines)
elif line_or_func:
if line_or_func >= 0:
return line_or_func
else:
n_lines = sum(1 for line in lines)
return n_lines + line_or_func
else:
return line_or_func
class BaseHeader:
"""
Base table header reader.
"""
auto_format = "col{}"
""" format string for auto-generating column names """
start_line = None
""" None, int, or a function of ``lines`` that returns None or int """
comment = None
""" regular expression for comment lines """
splitter_class = DefaultSplitter
""" Splitter class for splitting data lines into columns """
names = None
""" list of names corresponding to each data column """
write_comment = False
write_spacer_lines = ["ASCII_TABLE_WRITE_SPACER_LINE"]
def __init__(self):
self.splitter = self.splitter_class()
def _set_cols_from_names(self):
self.cols = [Column(name=x) for x in self.names]
def update_meta(self, lines, meta):
"""
Extract any table-level metadata, e.g. keywords, comments, column metadata, from
the table ``lines`` and update the OrderedDict ``meta`` in place. This base
method extracts comment lines and stores them in ``meta`` for output.
"""
if self.comment:
re_comment = re.compile(self.comment)
comment_lines = [x for x in lines if re_comment.match(x)]
else:
comment_lines = []
comment_lines = [
re.sub("^" + self.comment, "", x).strip() for x in comment_lines
]
if comment_lines:
meta.setdefault("table", {})["comments"] = comment_lines
def get_cols(self, lines):
"""Initialize the header Column objects from the table ``lines``.
Based on the previously set Header attributes find or create the column names.
Sets ``self.cols`` with the list of Columns.
Parameters
----------
lines : list
List of table lines
"""
start_line = _get_line_index(self.start_line, self.process_lines(lines))
if start_line is None:
# No header line so auto-generate names from n_data_cols
# Get the data values from the first line of table data to determine n_data_cols
try:
first_data_vals = next(self.data.get_str_vals())
except StopIteration:
raise InconsistentTableError(
"No data lines found so cannot autogenerate column names"
)
n_data_cols = len(first_data_vals)
self.names = [self.auto_format.format(i) for i in range(1, n_data_cols + 1)]
else:
for i, line in enumerate(self.process_lines(lines)):
if i == start_line:
break
else: # No header line matching
raise ValueError("No header line found in table")
self.names = next(self.splitter([line]))
self._set_cols_from_names()
def process_lines(self, lines):
"""Generator to yield non-blank and non-comment lines."""
re_comment = re.compile(self.comment) if self.comment else None
# Yield non-comment lines
for line in lines:
if line.strip() and (not self.comment or not re_comment.match(line)):
yield line
def write_comments(self, lines, meta):
if self.write_comment not in (False, None):
for comment in meta.get("comments", []):
lines.append(self.write_comment + comment)
def write(self, lines):
if self.start_line is not None:
for i, spacer_line in zip(
range(self.start_line), itertools.cycle(self.write_spacer_lines)
):
lines.append(spacer_line)
lines.append(self.splitter.join([x.info.name for x in self.cols]))
@property
def colnames(self):
"""Return the column names of the table."""
return tuple(
col.name if isinstance(col, Column) else col.info.name for col in self.cols
)
def remove_columns(self, names):
"""
Remove several columns from the table.
Parameters
----------
names : list
A list containing the names of the columns to remove
"""
colnames = self.colnames
for name in names:
if name not in colnames:
raise KeyError(f"Column {name} does not exist")
self.cols = [col for col in self.cols if col.name not in names]
def rename_column(self, name, new_name):
"""
Rename a column.
Parameters
----------
name : str
The current name of the column.
new_name : str
The new name for the column
"""
try:
idx = self.colnames.index(name)
except ValueError:
raise KeyError(f"Column {name} does not exist")
col = self.cols[idx]
# For writing self.cols can contain cols that are not Column. Raise
# exception in that case.
if isinstance(col, Column):
col.name = new_name
else:
raise TypeError(f"got column type {type(col)} instead of required {Column}")
def get_type_map_key(self, col):
return col.raw_type
def get_col_type(self, col):
try:
type_map_key = self.get_type_map_key(col)
return self.col_type_map[type_map_key.lower()]
except KeyError:
raise ValueError(
f'Unknown data type ""{col.raw_type}"" for column "{col.name}"'
)
def check_column_names(self, names, strict_names, guessing):
"""
Check column names.
This must be done before applying the names transformation
so that guessing will fail appropriately if ``names`` is supplied.
For instance if the basic reader is given a table with no column header
row.
Parameters
----------
names : list
User-supplied list of column names
strict_names : bool
Whether to impose extra requirements on names
guessing : bool
True if this method is being called while guessing the table format
"""
if strict_names:
# Impose strict requirements on column names (normally used in guessing)
bads = [" ", ",", "|", "\t", "'", '"']
for name in self.colnames:
if (
_is_number(name)
or len(name) == 0
or name[0] in bads
or name[-1] in bads
):
raise InconsistentTableError(
f"Column name {name!r} does not meet strict name requirements"
)
# When guessing require at least two columns, except for ECSV which can
# reliably be guessed from the header requirements.
if (
guessing
and len(self.colnames) <= 1
and self.__class__.__name__ != "EcsvHeader"
):
raise ValueError(
"Table format guessing requires at least two columns, got {}".format(
list(self.colnames)
)
)
if names is not None and len(names) != len(self.colnames):
raise InconsistentTableError(
"Length of names argument ({}) does not match number"
" of table columns ({})".format(len(names), len(self.colnames))
)
class BaseData:
"""
Base table data reader.
"""
start_line = None
""" None, int, or a function of ``lines`` that returns None or int """
end_line = None
""" None, int, or a function of ``lines`` that returns None or int """
comment = None
""" Regular expression for comment lines """
splitter_class = DefaultSplitter
""" Splitter class for splitting data lines into columns """
write_spacer_lines = ["ASCII_TABLE_WRITE_SPACER_LINE"]
fill_include_names = None
fill_exclude_names = None
fill_values = [(masked, "")]
formats = {}
def __init__(self):
# Need to make sure fill_values list is instance attribute, not class attribute.
# On read, this will be overwritten by the default in the ui.read (thus, in
# the current implementation there can be no different default for different
# Readers). On write, ui.py does not specify a default, so this line here matters.
self.fill_values = copy.copy(self.fill_values)
self.formats = copy.copy(self.formats)
self.splitter = self.splitter_class()
def process_lines(self, lines):
"""
READ: Strip out comment lines and blank lines from list of ``lines``.
Parameters
----------
lines : list
All lines in table
Returns
-------
lines : list
List of lines
"""
nonblank_lines = (x for x in lines if x.strip())
if self.comment:
re_comment = re.compile(self.comment)
return [x for x in nonblank_lines if not re_comment.match(x)]
else:
return [x for x in nonblank_lines]
def get_data_lines(self, lines):
"""
READ: Set ``data_lines`` attribute to lines slice comprising table data values.
"""
data_lines = self.process_lines(lines)
start_line = _get_line_index(self.start_line, data_lines)
end_line = _get_line_index(self.end_line, data_lines)
if start_line is not None or end_line is not None:
self.data_lines = data_lines[slice(start_line, end_line)]
else: # Don't copy entire data lines unless necessary
self.data_lines = data_lines
def get_str_vals(self):
"""Return a generator that returns a list of column values (as strings)
for each data line.
"""
return self.splitter(self.data_lines)
def masks(self, cols):
"""READ: Set fill value for each column and then apply that fill value.
In the first step it is evaluated with value from ``fill_values`` applies to
which column using ``fill_include_names`` and ``fill_exclude_names``.
In the second step all replacements are done for the appropriate columns.
"""
if self.fill_values:
self._set_fill_values(cols)
self._set_masks(cols)
def _set_fill_values(self, cols):
"""READ, WRITE: Set fill values of individual cols based on fill_values of BaseData.
fill values has the following form:
<fill_spec> = (<bad_value>, <fill_value>, <optional col_name>...)
fill_values = <fill_spec> or list of <fill_spec>'s
"""
if self.fill_values:
# when we write tables the columns may be astropy.table.Columns
# which don't carry a fill_values by default
for col in cols:
if not hasattr(col, "fill_values"):
col.fill_values = {}
# if input is only one <fill_spec>, then make it a list
with suppress(TypeError):
self.fill_values[0] + ""
self.fill_values = [self.fill_values]
# Step 1: Set the default list of columns which are affected by
# fill_values
colnames = set(self.header.colnames)
if self.fill_include_names is not None:
colnames.intersection_update(self.fill_include_names)
if self.fill_exclude_names is not None:
colnames.difference_update(self.fill_exclude_names)
# Step 2a: Find out which columns are affected by this tuple
# iterate over reversed order, so last condition is set first and
# overwritten by earlier conditions
for replacement in reversed(self.fill_values):
if len(replacement) < 2:
raise ValueError(
"Format of fill_values must be "
"(<bad>, <fill>, <optional col1>, ...)"
)
elif len(replacement) == 2:
affect_cols = colnames
else:
affect_cols = replacement[2:]
for i, key in (
(i, x)
for i, x in enumerate(self.header.colnames)
if x in affect_cols
):
cols[i].fill_values[replacement[0]] = str(replacement[1])
def _set_masks(self, cols):
"""READ: Replace string values in col.str_vals and set masks."""
if self.fill_values:
for col in (col for col in cols if col.fill_values):
col.mask = numpy.zeros(len(col.str_vals), dtype=bool)
for i, str_val in (
(i, x) for i, x in enumerate(col.str_vals) if x in col.fill_values
):
col.str_vals[i] = col.fill_values[str_val]
col.mask[i] = True
def _replace_vals(self, cols):
"""WRITE: replace string values in col.str_vals."""
if self.fill_values:
for col in (col for col in cols if col.fill_values):
for i, str_val in (
(i, x) for i, x in enumerate(col.str_vals) if x in col.fill_values
):
col.str_vals[i] = col.fill_values[str_val]
if masked in col.fill_values and hasattr(col, "mask"):
mask_val = col.fill_values[masked]
for i in col.mask.nonzero()[0]:
col.str_vals[i] = mask_val
def str_vals(self):
"""WRITE: convert all values in table to a list of lists of strings.
This sets the fill values and possibly column formats from the input
formats={} keyword, then ends up calling table.pprint._pformat_col_iter()
by a circuitous path. That function does the real work of formatting.
Finally replace anything matching the fill_values.
Returns
-------
values : list of list of str
"""
self._set_fill_values(self.cols)
self._set_col_formats()
for col in self.cols:
col.str_vals = list(col.info.iter_str_vals())
self._replace_vals(self.cols)
return [col.str_vals for col in self.cols]
def write(self, lines):
"""Write ``self.cols`` in place to ``lines``.
Parameters
----------
lines : list
List for collecting output of writing self.cols.
"""
if hasattr(self.start_line, "__call__"):
raise TypeError("Start_line attribute cannot be callable for write()")
else:
data_start_line = self.start_line or 0
while len(lines) < data_start_line:
lines.append(itertools.cycle(self.write_spacer_lines))
col_str_iters = self.str_vals()
for vals in zip(*col_str_iters):
lines.append(self.splitter.join(vals))
def _set_col_formats(self):
"""WRITE: set column formats."""
for col in self.cols:
if col.info.name in self.formats:
col.info.format = self.formats[col.info.name]
def convert_numpy(numpy_type):
"""Return a tuple containing a function which converts a list into a numpy
array and the type produced by the converter function.
Parameters
----------
numpy_type : numpy data-type
The numpy type required of an array returned by ``converter``. Must be a
valid `numpy type <https://numpy.org/doc/stable/user/basics.types.html>`_
(e.g., numpy.uint, numpy.int8, numpy.int64, numpy.float64) or a python
type covered by a numpy type (e.g., int, float, str, bool).
Returns
-------
converter : callable
``converter`` is a function which accepts a list and converts it to a
numpy array of type ``numpy_type``.
converter_type : type
``converter_type`` tracks the generic data type produced by the
converter function.
Raises
------
ValueError
Raised by ``converter`` if the list elements could not be converted to
the required type.
"""
# Infer converter type from an instance of numpy_type.
type_name = numpy.array([], dtype=numpy_type).dtype.name
if "int" in type_name:
converter_type = IntType
elif "float" in type_name:
converter_type = FloatType
elif "bool" in type_name:
converter_type = BoolType
elif "str" in type_name:
converter_type = StrType
else:
converter_type = AllType
def bool_converter(vals):
"""
Convert values "False" and "True" to bools. Raise an exception
for any other string values.
"""
if len(vals) == 0:
return numpy.array([], dtype=bool)
# Try a smaller subset first for a long array
if len(vals) > 10000:
svals = numpy.asarray(vals[:1000])
if not numpy.all(
(svals == "False") | (svals == "True") | (svals == "0") | (svals == "1")
):
raise ValueError('bool input strings must be False, True, 0, 1, or ""')
vals = numpy.asarray(vals)
trues = (vals == "True") | (vals == "1")
falses = (vals == "False") | (vals == "0")
if not numpy.all(trues | falses):
raise ValueError('bool input strings must be only False, True, 0, 1, or ""')
return trues
def generic_converter(vals):
return numpy.array(vals, numpy_type)
converter = bool_converter if converter_type is BoolType else generic_converter
return converter, converter_type
class BaseOutputter:
"""Output table as a dict of column objects keyed on column name. The
table data are stored as plain python lists within the column objects.
"""
# User-defined converters which gets set in ascii.ui if a `converter` kwarg
# is supplied.
converters = {}
# Derived classes must define default_converters and __call__
@staticmethod
def _validate_and_copy(col, converters):
"""Validate the format for the type converters and then copy those
which are valid converters for this column (i.e. converter type is
a subclass of col.type).
"""
# Allow specifying a single converter instead of a list of converters.
# The input `converters` must be a ``type`` value that can init np.dtype.
try:
# Don't allow list-like things that dtype accepts
assert type(converters) is type
converters = [numpy.dtype(converters)]
except (AssertionError, TypeError):
pass
converters_out = []
try:
for converter in converters:
try:
converter_func, converter_type = converter
except TypeError as err:
if str(err).startswith("cannot unpack"):
converter_func, converter_type = convert_numpy(converter)
else:
raise
if not issubclass(converter_type, NoType):
raise ValueError("converter_type must be a subclass of NoType")
if issubclass(converter_type, col.type):
converters_out.append((converter_func, converter_type))
except (ValueError, TypeError) as err:
raise ValueError(
"Error: invalid format for converters, see "
f"documentation\n{converters}: {err}"
)
return converters_out
def _convert_vals(self, cols):
for col in cols:
for key, converters in self.converters.items():
if fnmatch.fnmatch(col.name, key):
break
else:
if col.dtype is not None:
converters = [convert_numpy(col.dtype)]
else:
converters = self.default_converters
col.converters = self._validate_and_copy(col, converters)
# Catch the last error in order to provide additional information
# in case all attempts at column conversion fail. The initial
# value of of last_error will apply if no converters are defined
# and the first col.converters[0] access raises IndexError.
last_err = "no converters defined"
while not hasattr(col, "data"):
# Try converters, popping the unsuccessful ones from the list.
# If there are no converters left here then fail.
if not col.converters:
raise ValueError(f"Column {col.name} failed to convert: {last_err}")
converter_func, converter_type = col.converters[0]
if not issubclass(converter_type, col.type):
raise TypeError("converter type does not match column type")
try:
col.data = converter_func(col.str_vals)
col.type = converter_type
except (OverflowError, TypeError, ValueError) as err:
# Overflow during conversion (most likely an int that
# doesn't fit in native C long). Put string at the top of
# the converters list for the next while iteration.
# With python/cpython#95778 this has been supplemented with a
# "ValueError: Exceeds the limit (4300) for integer string conversion"
# so need to catch that as well.
if isinstance(err, OverflowError) or (
isinstance(err, ValueError)
and str(err).startswith("Exceeds the limit")
):
warnings.warn(
f"OverflowError converting to {converter_type.__name__} in"
f" column {col.name}, reverting to String.",
AstropyWarning,
)
col.converters.insert(0, convert_numpy(str))
else:
col.converters.pop(0)
last_err = err
def _deduplicate_names(names):
"""Ensure there are no duplicates in ``names``.
This is done by iteratively adding ``_<N>`` to the name for increasing N
until the name is unique.
"""
new_names = []
existing_names = set()
for name in names:
base_name = name + "_"
i = 1
while name in existing_names:
# Iterate until a unique name is found
name = base_name + str(i)
i += 1
new_names.append(name)
existing_names.add(name)
return new_names
class TableOutputter(BaseOutputter):
"""
Output the table as an astropy.table.Table object.
"""
default_converters = [convert_numpy(int), convert_numpy(float), convert_numpy(str)]
def __call__(self, cols, meta):
# Sets col.data to numpy array and col.type to io.ascii Type class (e.g.
# FloatType) for each col.
self._convert_vals(cols)
t_cols = [
numpy.ma.MaskedArray(x.data, mask=x.mask)
if hasattr(x, "mask") and numpy.any(x.mask)
else x.data
for x in cols
]
out = Table(t_cols, names=[x.name for x in cols], meta=meta["table"])
for col, out_col in zip(cols, out.columns.values()):
for attr in ("format", "unit", "description"):
if hasattr(col, attr):
setattr(out_col, attr, getattr(col, attr))
if hasattr(col, "meta"):
out_col.meta.update(col.meta)
return out
class MetaBaseReader(type):
def __init__(cls, name, bases, dct):
super().__init__(name, bases, dct)
format = dct.get("_format_name")
if format is None:
return
fast = dct.get("_fast")
if fast is not None:
FAST_CLASSES[format] = cls
FORMAT_CLASSES[format] = cls
io_formats = ["ascii." + format] + dct.get("_io_registry_format_aliases", [])
if dct.get("_io_registry_suffix"):
func = functools.partial(connect.io_identify, dct["_io_registry_suffix"])
connect.io_registry.register_identifier(io_formats[0], Table, func)
for io_format in io_formats:
func = functools.partial(connect.io_read, io_format)
header = f"ASCII reader '{io_format}' details\n"
func.__doc__ = (
inspect.cleandoc(READ_DOCSTRING).strip()
+ "\n\n"
+ header
+ re.sub(".", "=", header)
+ "\n"
)
func.__doc__ += inspect.cleandoc(cls.__doc__).strip()
connect.io_registry.register_reader(io_format, Table, func)
if dct.get("_io_registry_can_write", True):
func = functools.partial(connect.io_write, io_format)
header = f"ASCII writer '{io_format}' details\n"
func.__doc__ = (
inspect.cleandoc(WRITE_DOCSTRING).strip()
+ "\n\n"
+ header
+ re.sub(".", "=", header)
+ "\n"
)
func.__doc__ += inspect.cleandoc(cls.__doc__).strip()
connect.io_registry.register_writer(io_format, Table, func)
def _is_number(x):
with suppress(ValueError):
x = float(x)
return True
return False
def _apply_include_exclude_names(table, names, include_names, exclude_names):
"""
Apply names, include_names and exclude_names to a table or BaseHeader.
For the latter this relies on BaseHeader implementing ``colnames``,
``rename_column``, and ``remove_columns``.
Parameters
----------
table : `~astropy.table.Table`, `~astropy.io.ascii.BaseHeader`
Input table or BaseHeader subclass instance
names : list
List of names to override those in table (set to None to use existing names)
include_names : list
List of names to include in output
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
"""
def rename_columns(table, names):
# Rename table column names to those passed by user
# Temporarily rename with names that are not in `names` or `table.colnames`.
# This ensures that rename succeeds regardless of existing names.
xxxs = "x" * max(len(name) for name in list(names) + list(table.colnames))
for ii, colname in enumerate(table.colnames):
table.rename_column(colname, xxxs + str(ii))
for ii, name in enumerate(names):
table.rename_column(xxxs + str(ii), name)
if names is not None:
rename_columns(table, names)
else:
colnames_uniq = _deduplicate_names(table.colnames)
if colnames_uniq != list(table.colnames):
rename_columns(table, colnames_uniq)
names_set = set(table.colnames)
if include_names is not None:
names_set.intersection_update(include_names)
if exclude_names is not None:
names_set.difference_update(exclude_names)
if names_set != set(table.colnames):
remove_names = set(table.colnames) - names_set
table.remove_columns(remove_names)
class BaseReader(metaclass=MetaBaseReader):
"""Class providing methods to read and write an ASCII table using the specified
header, data, inputter, and outputter instances.
Typical usage is to instantiate a Reader() object and customize the
``header``, ``data``, ``inputter``, and ``outputter`` attributes. Each
of these is an object of the corresponding class.
There is one method ``inconsistent_handler`` that can be used to customize the
behavior of ``read()`` in the event that a data row doesn't match the header.
The default behavior is to raise an InconsistentTableError.
"""
names = None
include_names = None
exclude_names = None
strict_names = False
guessing = False
encoding = None
header_class = BaseHeader
data_class = BaseData
inputter_class = BaseInputter
outputter_class = TableOutputter
# Max column dimension that writer supports for this format. Exceptions
# include ECSV (no limit) and HTML (max_ndim=2).
max_ndim = 1
def __init__(self):
self.header = self.header_class()
self.data = self.data_class()
self.inputter = self.inputter_class()
self.outputter = self.outputter_class()
# Data and Header instances benefit from a little cross-coupling. Header may need to
# know about number of data columns for auto-column name generation and Data may
# need to know about header (e.g. for fixed-width tables where widths are spec'd in header.
self.data.header = self.header
self.header.data = self.data
# Metadata, consisting of table-level meta and column-level meta. The latter
# could include information about column type, description, formatting, etc,
# depending on the table meta format.
self.meta = OrderedDict(table=OrderedDict(), cols=OrderedDict())
def _check_multidim_table(self, table):
"""Check that the dimensions of columns in ``table`` are acceptable.
The reader class attribute ``max_ndim`` defines the maximum dimension of
columns that can be written using this format. The base value is ``1``,
corresponding to normal scalar columns with just a length.
Parameters
----------
table : `~astropy.table.Table`
Input table.
Raises
------
ValueError
If any column exceeds the number of allowed dimensions
"""
_check_multidim_table(table, self.max_ndim)
def read(self, table):
"""Read the ``table`` and return the results in a format determined by
the ``outputter`` attribute.
The ``table`` parameter is any string or object that can be processed
by the instance ``inputter``. For the base Inputter class ``table`` can be
one of:
* File name
* File-like object
* String (newline separated) with all header and data lines (must have at least 2 lines)
* List of strings
Parameters
----------
table : str, file-like, list
Input table.
Returns
-------
table : `~astropy.table.Table`
Output table
"""
# If ``table`` is a file then store the name in the ``data``
# attribute. The ``table`` is a "file" if it is a string
# without the new line specific to the OS.
with suppress(TypeError):
# Strings only
if os.linesep not in table + "":
self.data.table_name = os.path.basename(table)
# If one of the newline chars is set as field delimiter, only
# accept the other one as line splitter
if self.header.splitter.delimiter == "\n":
newline = "\r"
elif self.header.splitter.delimiter == "\r":
newline = "\n"
else:
newline = None
# Get a list of the lines (rows) in the table
self.lines = self.inputter.get_lines(table, newline=newline)
# Set self.data.data_lines to a slice of lines contain the data rows
self.data.get_data_lines(self.lines)
# Extract table meta values (e.g. keywords, comments, etc). Updates self.meta.
self.header.update_meta(self.lines, self.meta)
# Get the table column definitions
self.header.get_cols(self.lines)
# Make sure columns are valid
self.header.check_column_names(self.names, self.strict_names, self.guessing)
self.cols = cols = self.header.cols
self.data.splitter.cols = cols
n_cols = len(cols)
for i, str_vals in enumerate(self.data.get_str_vals()):
if len(str_vals) != n_cols:
str_vals = self.inconsistent_handler(str_vals, n_cols)
# if str_vals is None, we skip this row
if str_vals is None:
continue
# otherwise, we raise an error only if it is still inconsistent
if len(str_vals) != n_cols:
errmsg = (
"Number of header columns ({}) inconsistent with"
" data columns ({}) at data line {}\n"
"Header values: {}\n"
"Data values: {}".format(
n_cols, len(str_vals), i, [x.name for x in cols], str_vals
)
)
raise InconsistentTableError(errmsg)
for j, col in enumerate(cols):
col.str_vals.append(str_vals[j])
self.data.masks(cols)
if hasattr(self.header, "table_meta"):
self.meta["table"].update(self.header.table_meta)
_apply_include_exclude_names(
self.header, self.names, self.include_names, self.exclude_names
)
table = self.outputter(self.header.cols, self.meta)
self.cols = self.header.cols
return table
def inconsistent_handler(self, str_vals, ncols):
"""
Adjust or skip data entries if a row is inconsistent with the header.
The default implementation does no adjustment, and hence will always trigger
an exception in read() any time the number of data entries does not match
the header.
Note that this will *not* be called if the row already matches the header.
Parameters
----------
str_vals : list
A list of value strings from the current row of the table.
ncols : int
The expected number of entries from the table header.
Returns
-------
str_vals : list
List of strings to be parsed into data entries in the output table. If
the length of this list does not match ``ncols``, an exception will be
raised in read(). Can also be None, in which case the row will be
skipped.
"""
# an empty list will always trigger an InconsistentTableError in read()
return str_vals
@property
def comment_lines(self):
"""Return lines in the table that match header.comment regexp."""
if not hasattr(self, "lines"):
raise ValueError(
"Table must be read prior to accessing the header comment lines"
)
if self.header.comment:
re_comment = re.compile(self.header.comment)
comment_lines = [x for x in self.lines if re_comment.match(x)]
else:
comment_lines = []
return comment_lines
def update_table_data(self, table):
"""
Update table columns in place if needed.
This is a hook to allow updating the table columns after name
filtering but before setting up to write the data. This is currently
only used by ECSV and is otherwise just a pass-through.
Parameters
----------
table : `astropy.table.Table`
Input table for writing
Returns
-------
table : `astropy.table.Table`
Output table for writing
"""
return table
def write_header(self, lines, meta):
self.header.write_comments(lines, meta)
self.header.write(lines)
def write(self, table):
"""
Write ``table`` as list of strings.
Parameters
----------
table : `~astropy.table.Table`
Input table data.
Returns
-------
lines : list
List of strings corresponding to ASCII table
"""
# Check column names before altering
self.header.cols = list(table.columns.values())
self.header.check_column_names(self.names, self.strict_names, False)
# In-place update of columns in input ``table`` to reflect column
# filtering. Note that ``table`` is guaranteed to be a copy of the
# original user-supplied table.
_apply_include_exclude_names(
table, self.names, self.include_names, self.exclude_names
)
# This is a hook to allow updating the table columns after name
# filtering but before setting up to write the data. This is currently
# only used by ECSV and is otherwise just a pass-through.
table = self.update_table_data(table)
# Check that table column dimensions are supported by this format class.
# Most formats support only 1-d columns, but some like ECSV support N-d.
self._check_multidim_table(table)
# Now use altered columns
new_cols = list(table.columns.values())
# link information about the columns to the writer object (i.e. self)
self.header.cols = new_cols
self.data.cols = new_cols
self.header.table_meta = table.meta
# Write header and data to lines list
lines = []
self.write_header(lines, table.meta)
self.data.write(lines)
return lines
class ContinuationLinesInputter(BaseInputter):
"""Inputter where lines ending in ``continuation_char`` are joined with the subsequent line.
Example::
col1 col2 col3
1 \
2 3
4 5 \
6
"""
continuation_char = "\\"
replace_char = " "
# If no_continue is not None then lines matching this regex are not subject
# to line continuation. The initial use case here is Daophot. In this
# case the continuation character is just replaced with replace_char.
no_continue = None
def process_lines(self, lines):
re_no_continue = re.compile(self.no_continue) if self.no_continue else None
parts = []
outlines = []
for line in lines:
if re_no_continue and re_no_continue.match(line):
line = line.replace(self.continuation_char, self.replace_char)
if line.endswith(self.continuation_char):
parts.append(line.replace(self.continuation_char, self.replace_char))
else:
parts.append(line)
outlines.append("".join(parts))
parts = []
return outlines
class WhitespaceSplitter(DefaultSplitter):
def process_line(self, line):
"""Replace tab with space within ``line`` while respecting quoted substrings."""
newline = []
in_quote = False
lastchar = None
for char in line:
if char == self.quotechar and (
self.escapechar is None or lastchar != self.escapechar
):
in_quote = not in_quote
if char == "\t" and not in_quote:
char = " "
lastchar = char
newline.append(char)
return "".join(newline)
extra_reader_pars = (
"Reader",
"Inputter",
"Outputter",
"delimiter",
"comment",
"quotechar",
"header_start",
"data_start",
"data_end",
"converters",
"encoding",
"data_Splitter",
"header_Splitter",
"names",
"include_names",
"exclude_names",
"strict_names",
"fill_values",
"fill_include_names",
"fill_exclude_names",
)
def _get_reader(Reader, Inputter=None, Outputter=None, **kwargs):
"""Initialize a table reader allowing for common customizations. See ui.get_reader()
for param docs. This routine is for internal (package) use only and is useful
because it depends only on the "core" module.
"""
from .fastbasic import FastBasic
if issubclass(Reader, FastBasic): # Fast readers handle args separately
if Inputter is not None:
kwargs["Inputter"] = Inputter
return Reader(**kwargs)
# If user explicitly passed a fast reader with enable='force'
# (e.g. by passing non-default options), raise an error for slow readers
if "fast_reader" in kwargs:
if kwargs["fast_reader"]["enable"] == "force":
raise ParameterError(
"fast_reader required with "
"{}, but this is not a fast C reader: {}".format(
kwargs["fast_reader"], Reader
)
)
else:
del kwargs["fast_reader"] # Otherwise ignore fast_reader parameter
reader_kwargs = {k: v for k, v in kwargs.items() if k not in extra_reader_pars}
reader = Reader(**reader_kwargs)
if Inputter is not None:
reader.inputter = Inputter()
if Outputter is not None:
reader.outputter = Outputter()
# Issue #855 suggested to set data_start to header_start + default_header_length
# Thus, we need to retrieve this from the class definition before resetting these numbers.
try:
default_header_length = reader.data.start_line - reader.header.start_line
except TypeError: # Start line could be None or an instancemethod
default_header_length = None
# csv.reader is hard-coded to recognise either '\r' or '\n' as end-of-line,
# therefore DefaultSplitter cannot handle these as delimiters.
if "delimiter" in kwargs:
if kwargs["delimiter"] in ("\n", "\r", "\r\n"):
reader.header.splitter = BaseSplitter()
reader.data.splitter = BaseSplitter()
reader.header.splitter.delimiter = kwargs["delimiter"]
reader.data.splitter.delimiter = kwargs["delimiter"]
if "comment" in kwargs:
reader.header.comment = kwargs["comment"]
reader.data.comment = kwargs["comment"]
if "quotechar" in kwargs:
reader.header.splitter.quotechar = kwargs["quotechar"]
reader.data.splitter.quotechar = kwargs["quotechar"]
if "data_start" in kwargs:
reader.data.start_line = kwargs["data_start"]
if "data_end" in kwargs:
reader.data.end_line = kwargs["data_end"]
if "header_start" in kwargs:
if reader.header.start_line is not None:
reader.header.start_line = kwargs["header_start"]
# For FixedWidthTwoLine the data_start is calculated relative to the position line.
# However, position_line is given as absolute number and not relative to header_start.
# So, ignore this Reader here.
if (
("data_start" not in kwargs)
and (default_header_length is not None)
and reader._format_name
not in ["fixed_width_two_line", "commented_header"]
):
reader.data.start_line = (
reader.header.start_line + default_header_length
)
elif kwargs["header_start"] is not None:
# User trying to set a None header start to some value other than None
raise ValueError("header_start cannot be modified for this Reader")
if "converters" in kwargs:
reader.outputter.converters = kwargs["converters"]
if "data_Splitter" in kwargs:
reader.data.splitter = kwargs["data_Splitter"]()
if "header_Splitter" in kwargs:
reader.header.splitter = kwargs["header_Splitter"]()
if "names" in kwargs:
reader.names = kwargs["names"]
if None in reader.names:
raise TypeError("Cannot have None for column name")
if len(set(reader.names)) != len(reader.names):
raise ValueError("Duplicate column names")
if "include_names" in kwargs:
reader.include_names = kwargs["include_names"]
if "exclude_names" in kwargs:
reader.exclude_names = kwargs["exclude_names"]
# Strict names is normally set only within the guessing process to
# indicate that column names cannot be numeric or have certain
# characters at the beginning or end. It gets used in
# BaseHeader.check_column_names().
if "strict_names" in kwargs:
reader.strict_names = kwargs["strict_names"]
if "fill_values" in kwargs:
reader.data.fill_values = kwargs["fill_values"]
if "fill_include_names" in kwargs:
reader.data.fill_include_names = kwargs["fill_include_names"]
if "fill_exclude_names" in kwargs:
reader.data.fill_exclude_names = kwargs["fill_exclude_names"]
if "encoding" in kwargs:
reader.encoding = kwargs["encoding"]
reader.inputter.encoding = kwargs["encoding"]
return reader
extra_writer_pars = (
"delimiter",
"comment",
"quotechar",
"formats",
"strip_whitespace",
"names",
"include_names",
"exclude_names",
"fill_values",
"fill_include_names",
"fill_exclude_names",
)
def _get_writer(Writer, fast_writer, **kwargs):
"""Initialize a table writer allowing for common customizations. This
routine is for internal (package) use only and is useful because it depends
only on the "core" module.
"""
from .fastbasic import FastBasic
# A value of None for fill_values imply getting the default string
# representation of masked values (depending on the writer class), but the
# machinery expects a list. The easiest here is to just pop the value off,
# i.e. fill_values=None is the same as not providing it at all.
if "fill_values" in kwargs and kwargs["fill_values"] is None:
del kwargs["fill_values"]
if issubclass(Writer, FastBasic): # Fast writers handle args separately
return Writer(**kwargs)
elif fast_writer and f"fast_{Writer._format_name}" in FAST_CLASSES:
# Switch to fast writer
kwargs["fast_writer"] = fast_writer
return FAST_CLASSES[f"fast_{Writer._format_name}"](**kwargs)
writer_kwargs = {k: v for k, v in kwargs.items() if k not in extra_writer_pars}
writer = Writer(**writer_kwargs)
if "delimiter" in kwargs:
writer.header.splitter.delimiter = kwargs["delimiter"]
writer.data.splitter.delimiter = kwargs["delimiter"]
if "comment" in kwargs:
writer.header.write_comment = kwargs["comment"]
writer.data.write_comment = kwargs["comment"]
if "quotechar" in kwargs:
writer.header.splitter.quotechar = kwargs["quotechar"]
writer.data.splitter.quotechar = kwargs["quotechar"]
if "formats" in kwargs:
writer.data.formats = kwargs["formats"]
if "strip_whitespace" in kwargs:
if kwargs["strip_whitespace"]:
# Restore the default SplitterClass process_val method which strips
# whitespace. This may have been changed in the Writer
# initialization (e.g. Rdb and Tab)
writer.data.splitter.process_val = operator.methodcaller("strip", " \t")
else:
writer.data.splitter.process_val = None
if "names" in kwargs:
writer.header.names = kwargs["names"]
if "include_names" in kwargs:
writer.include_names = kwargs["include_names"]
if "exclude_names" in kwargs:
writer.exclude_names = kwargs["exclude_names"]
if "fill_values" in kwargs:
# Prepend user-specified values to the class default.
with suppress(TypeError, IndexError):
# Test if it looks like (match, replace_string, optional_colname),
# in which case make it a list
kwargs["fill_values"][1] + ""
kwargs["fill_values"] = [kwargs["fill_values"]]
writer.data.fill_values = kwargs["fill_values"] + writer.data.fill_values
if "fill_include_names" in kwargs:
writer.data.fill_include_names = kwargs["fill_include_names"]
if "fill_exclude_names" in kwargs:
writer.data.fill_exclude_names = kwargs["fill_exclude_names"]
return writer
|
aea9d8a4080b8ddda11ed9ac1f81d783b4877fcaec6e985bb77a54931d531ecc | # Licensed under a 3-clause BSD style license
"""
:Author: Simon Gibbons ([email protected]).
"""
from .core import DefaultSplitter
from .fixedwidth import (
FixedWidth,
FixedWidthData,
FixedWidthHeader,
FixedWidthTwoLineDataSplitter,
)
class SimpleRSTHeader(FixedWidthHeader):
position_line = 0
start_line = 1
splitter_class = DefaultSplitter
position_char = "="
def get_fixedwidth_params(self, line):
vals, starts, ends = super().get_fixedwidth_params(line)
# The right hand column can be unbounded
ends[-1] = None
return vals, starts, ends
class SimpleRSTData(FixedWidthData):
end_line = -1
splitter_class = FixedWidthTwoLineDataSplitter
class RST(FixedWidth):
"""reStructuredText simple format table.
See: https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html#simple-tables
Example::
>>> from astropy.table import QTable
>>> import astropy.units as u
>>> import sys
>>> tbl = QTable({"wave": [350, 950] * u.nm, "response": [0.7, 1.2] * u.count})
>>> tbl.write(sys.stdout, format="ascii.rst")
===== ========
wave response
===== ========
350.0 0.7
950.0 1.2
===== ========
Like other fixed-width formats, when writing a table you can provide ``header_rows``
to specify a list of table rows to output as the header. For example::
>>> tbl.write(sys.stdout, format="ascii.rst", header_rows=['name', 'unit'])
===== ========
wave response
nm ct
===== ========
350.0 0.7
950.0 1.2
===== ========
Currently there is no support for reading tables which utilize continuation lines,
or for ones which define column spans through the use of an additional
line of dashes in the header.
"""
_format_name = "rst"
_description = "reStructuredText simple table"
data_class = SimpleRSTData
header_class = SimpleRSTHeader
def __init__(self, header_rows=None):
super().__init__(delimiter_pad=None, bookend=False, header_rows=header_rows)
def write(self, lines):
lines = super().write(lines)
idx = len(self.header.header_rows)
lines = [lines[idx]] + lines + [lines[idx]]
return lines
def read(self, table):
self.data.start_line = 2 + len(self.header.header_rows)
return super().read(table)
|
d81c5bbd7e24232c0be88075c2bdcc4172110bcee561f8d6f01a3d2793e873c1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
fixedwidth.py:
Read or write a table with fixed width columns.
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
from . import basic, core
from .core import DefaultSplitter, InconsistentTableError
class FixedWidthSplitter(core.BaseSplitter):
"""
Split line based on fixed start and end positions for each ``col`` in
``self.cols``.
This class requires that the Header class will have defined ``col.start``
and ``col.end`` for each column. The reference to the ``header.cols`` gets
put in the splitter object by the base Reader.read() function just in time
for splitting data lines by a ``data`` object.
Note that the ``start`` and ``end`` positions are defined in the pythonic
style so line[start:end] is the desired substring for a column. This splitter
class does not have a hook for ``process_lines`` since that is generally not
useful for fixed-width input.
"""
delimiter_pad = ""
bookend = False
delimiter = "|"
def __call__(self, lines):
for line in lines:
vals = [line[x.start : x.end] for x in self.cols]
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals, widths):
pad = self.delimiter_pad or ""
delimiter = self.delimiter or ""
padded_delim = pad + delimiter + pad
if self.bookend:
bookend_left = delimiter + pad
bookend_right = pad + delimiter
else:
bookend_left = ""
bookend_right = ""
vals = [" " * (width - len(val)) + val for val, width in zip(vals, widths)]
return bookend_left + padded_delim.join(vals) + bookend_right
class FixedWidthHeaderSplitter(DefaultSplitter):
"""Splitter class that splits on ``|``."""
delimiter = "|"
class FixedWidthHeader(basic.BasicHeader):
"""
Fixed width table header reader.
"""
splitter_class = FixedWidthHeaderSplitter
""" Splitter class for splitting data lines into columns """
position_line = None # secondary header line position
""" row index of line that specifies position (default = 1) """
set_of_position_line_characters = set(r'`~!#$%^&*-_+=\|":' + "'")
def get_line(self, lines, index):
for i, line in enumerate(self.process_lines(lines)):
if i == index:
break
else: # No header line matching
raise InconsistentTableError("No header line found in table")
return line
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines``.
Based on the previously set Header attributes find or create the column names.
Sets ``self.cols`` with the list of Columns.
Parameters
----------
lines : list
List of table lines
"""
header_rows = getattr(self, "header_rows", ["name"])
# See "else" clause below for explanation of start_line and position_line
start_line = core._get_line_index(self.start_line, self.process_lines(lines))
position_line = core._get_line_index(
self.position_line, self.process_lines(lines)
)
# If start_line is none then there is no header line. Column positions are
# determined from first data line and column names are either supplied by user
# or auto-generated.
if start_line is None:
if position_line is not None:
raise ValueError(
"Cannot set position_line without also setting header_start"
)
# data.data_lines attribute already set via self.data.get_data_lines(lines)
# in BaseReader.read(). This includes slicing for data_start / data_end.
data_lines = self.data.data_lines
if not data_lines:
raise InconsistentTableError(
"No data lines found so cannot autogenerate column names"
)
vals, starts, ends = self.get_fixedwidth_params(data_lines[0])
self.names = [self.auto_format.format(i) for i in range(1, len(vals) + 1)]
else:
# This bit of code handles two cases:
# start_line = <index> and position_line = None
# Single header line where that line is used to determine both the
# column positions and names.
# start_line = <index> and position_line = <index2>
# Two header lines where the first line defines the column names and
# the second line defines the column positions
if position_line is not None:
# Define self.col_starts and self.col_ends so that the call to
# get_fixedwidth_params below will use those to find the header
# column names. Note that get_fixedwidth_params returns Python
# slice col_ends but expects inclusive col_ends on input (for
# more intuitive user interface).
line = self.get_line(lines, position_line)
if len(set(line) - {self.splitter.delimiter, " "}) != 1:
raise InconsistentTableError(
"Position line should only contain delimiters and "
'one other character, e.g. "--- ------- ---".'
)
# The line above lies. It accepts white space as well.
# We don't want to encourage using three different
# characters, because that can cause ambiguities, but white
# spaces are so common everywhere that practicality beats
# purity here.
charset = self.set_of_position_line_characters.union(
{self.splitter.delimiter, " "}
)
if not set(line).issubset(charset):
raise InconsistentTableError(
f"Characters in position line must be part of {charset}"
)
vals, self.col_starts, col_ends = self.get_fixedwidth_params(line)
self.col_ends = [x - 1 if x is not None else None for x in col_ends]
# Get the column names from the header line
line = self.get_line(lines, start_line + header_rows.index("name"))
self.names, starts, ends = self.get_fixedwidth_params(line)
self._set_cols_from_names()
for ii, attr in enumerate(header_rows):
if attr != "name":
line = self.get_line(lines, start_line + ii)
vals = self.get_fixedwidth_params(line)[0]
for col, val in zip(self.cols, vals):
if val:
setattr(col, attr, val)
# Set column start and end positions.
for i, col in enumerate(self.cols):
col.start = starts[i]
col.end = ends[i]
def get_fixedwidth_params(self, line):
"""
Split ``line`` on the delimiter and determine column values and
column start and end positions. This might include null columns with
zero length (e.g. for ``header row = "| col1 || col2 | col3 |"`` or
``header2_row = "----- ------- -----"``). The null columns are
stripped out. Returns the values between delimiters and the
corresponding start and end positions.
Parameters
----------
line : str
Input line
Returns
-------
vals : list
List of values.
starts : list
List of starting indices.
ends : list
List of ending indices.
"""
# If column positions are already specified then just use those.
# If neither column starts or ends are given, figure out positions
# between delimiters. Otherwise, either the starts or the ends have
# been given, so figure out whichever wasn't given.
if self.col_starts is not None and self.col_ends is not None:
starts = list(self.col_starts) # could be any iterable, e.g. np.array
# user supplies inclusive endpoint
ends = [x + 1 if x is not None else None for x in self.col_ends]
if len(starts) != len(ends):
raise ValueError(
"Fixed width col_starts and col_ends must have the same length"
)
vals = [line[start:end].strip() for start, end in zip(starts, ends)]
elif self.col_starts is None and self.col_ends is None:
# There might be a cleaner way to do this but it works...
vals = line.split(self.splitter.delimiter)
starts = [0]
ends = []
for val in vals:
if val:
ends.append(starts[-1] + len(val))
starts.append(ends[-1] + 1)
else:
starts[-1] += 1
starts = starts[:-1]
vals = [x.strip() for x in vals if x]
if len(vals) != len(starts) or len(vals) != len(ends):
raise InconsistentTableError("Error parsing fixed width header")
else:
# exactly one of col_starts or col_ends is given...
if self.col_starts is not None:
starts = list(self.col_starts)
ends = starts[1:] + [None] # Assume each col ends where the next starts
else: # self.col_ends is not None
ends = [x + 1 for x in self.col_ends]
starts = [0] + ends[:-1] # Assume each col starts where the last ended
vals = [line[start:end].strip() for start, end in zip(starts, ends)]
return vals, starts, ends
def write(self, lines):
# Header line not written until data are formatted. Until then it is
# not known how wide each column will be for fixed width.
pass
class FixedWidthData(basic.BasicData):
"""
Base table data reader.
"""
splitter_class = FixedWidthSplitter
""" Splitter class for splitting data lines into columns """
start_line = None
def write(self, lines):
default_header_rows = [] if self.header.start_line is None else ["name"]
header_rows = getattr(self, "header_rows", default_header_rows)
# First part is getting the widths of each column.
# List (rows) of list (column values) for data lines
vals_list = []
col_str_iters = self.str_vals()
for vals in zip(*col_str_iters):
vals_list.append(vals)
# List (rows) of list (columns values) for header lines.
hdrs_list = []
for col_attr in header_rows:
vals = [
"" if (val := getattr(col.info, col_attr)) is None else str(val)
for col in self.cols
]
hdrs_list.append(vals)
# Widths for data columns
widths = [
max(len(vals[i_col]) for vals in vals_list)
for i_col in range(len(self.cols))
]
# Incorporate widths for header columns (if there are any)
if hdrs_list:
for i_col in range(len(self.cols)):
widths[i_col] = max(
widths[i_col], max(len(vals[i_col]) for vals in hdrs_list)
)
# Now collect formatted header and data lines into the output lines
for vals in hdrs_list:
lines.append(self.splitter.join(vals, widths))
if self.header.position_line is not None:
vals = [self.header.position_char * width for width in widths]
lines.append(self.splitter.join(vals, widths))
for vals in vals_list:
lines.append(self.splitter.join(vals, widths))
return lines
class FixedWidth(basic.Basic):
"""Fixed width table with single header line defining column names and positions.
Examples::
# Bar delimiter in header and data
| Col1 | Col2 | Col3 |
| 1.2 | hello there | 3 |
| 2.4 | many words | 7 |
# Bar delimiter in header only
Col1 | Col2 | Col3
1.2 hello there 3
2.4 many words 7
# No delimiter with column positions specified as input
Col1 Col2Col3
1.2hello there 3
2.4many words 7
See the :ref:`astropy:fixed_width_gallery` for specific usage examples.
"""
_format_name = "fixed_width"
_description = "Fixed width"
header_class = FixedWidthHeader
data_class = FixedWidthData
def __init__(
self,
col_starts=None,
col_ends=None,
delimiter_pad=" ",
bookend=True,
header_rows=None,
):
if header_rows is None:
header_rows = ["name"]
super().__init__()
self.data.splitter.delimiter_pad = delimiter_pad
self.data.splitter.bookend = bookend
self.header.col_starts = col_starts
self.header.col_ends = col_ends
self.header.header_rows = header_rows
self.data.header_rows = header_rows
if self.data.start_line is None:
self.data.start_line = len(header_rows)
class FixedWidthNoHeaderHeader(FixedWidthHeader):
"""Header reader for fixed with tables with no header line."""
start_line = None
class FixedWidthNoHeaderData(FixedWidthData):
"""Data reader for fixed width tables with no header line."""
start_line = 0
class FixedWidthNoHeader(FixedWidth):
"""Fixed width table which has no header line.
When reading, column names are either input (``names`` keyword) or
auto-generated. Column positions are determined either by input
(``col_starts`` and ``col_stops`` keywords) or by splitting the first data
line. In the latter case a ``delimiter`` is required to split the data
line.
Examples::
# Bar delimiter in header and data
| 1.2 | hello there | 3 |
| 2.4 | many words | 7 |
# Compact table having no delimiter and column positions specified as input
1.2hello there3
2.4many words 7
This class is just a convenience wrapper around the ``FixedWidth`` reader
but with ``header_start=None`` and ``data_start=0``.
See the :ref:`astropy:fixed_width_gallery` for specific usage examples.
"""
_format_name = "fixed_width_no_header"
_description = "Fixed width with no header"
header_class = FixedWidthNoHeaderHeader
data_class = FixedWidthNoHeaderData
def __init__(self, col_starts=None, col_ends=None, delimiter_pad=" ", bookend=True):
super().__init__(
col_starts,
col_ends,
delimiter_pad=delimiter_pad,
bookend=bookend,
header_rows=[],
)
class FixedWidthTwoLineHeader(FixedWidthHeader):
"""Header reader for fixed width tables splitting on whitespace.
For fixed width tables with several header lines, there is typically
a white-space delimited format line, so splitting on white space is
needed.
"""
splitter_class = DefaultSplitter
class FixedWidthTwoLineDataSplitter(FixedWidthSplitter):
"""Splitter for fixed width tables splitting on ``' '``."""
delimiter = " "
class FixedWidthTwoLineData(FixedWidthData):
"""Data reader for fixed with tables with two header lines."""
splitter_class = FixedWidthTwoLineDataSplitter
class FixedWidthTwoLine(FixedWidth):
"""Fixed width table which has two header lines.
The first header line defines the column names and the second implicitly
defines the column positions.
Examples::
# Typical case with column extent defined by ---- under column names.
col1 col2 <== header_start = 0
----- ------------ <== position_line = 1, position_char = "-"
1 bee flies <== data_start = 2
2 fish swims
# Pretty-printed table
+------+------------+
| Col1 | Col2 |
+------+------------+
| 1.2 | "hello" |
| 2.4 | there world|
+------+------------+
See the :ref:`astropy:fixed_width_gallery` for specific usage examples.
"""
_format_name = "fixed_width_two_line"
_description = "Fixed width with second header line"
data_class = FixedWidthTwoLineData
header_class = FixedWidthTwoLineHeader
def __init__(
self,
position_line=None,
position_char="-",
delimiter_pad=None,
bookend=False,
header_rows=None,
):
if len(position_char) != 1:
raise ValueError(
f'Position_char="{position_char}" must be a single character'
)
super().__init__(
delimiter_pad=delimiter_pad, bookend=bookend, header_rows=header_rows
)
if position_line is None:
position_line = len(self.header.header_rows)
self.header.position_line = position_line
self.header.position_char = position_char
self.data.start_line = position_line + 1
|
bdcd10f8ae3dcbbee8b0114a8c15240f42408af0d1aa6f57f776baf230854a89 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Classes to read AAS MRT table format.
Ref: https://journals.aas.org/mrt-standards
:Copyright: Smithsonian Astrophysical Observatory (2021)
:Author: Tom Aldcroft ([email protected]), \
Suyog Garg ([email protected])
"""
import re
import warnings
from io import StringIO
from math import ceil, floor
from string import Template
from textwrap import wrap
import numpy as np
from astropy import units as u
from astropy.table import Column, MaskedColumn, Table
from . import cds, core, fixedwidth
MAX_SIZE_README_LINE = 80
MAX_COL_INTLIMIT = 100000
__doctest_skip__ = ["*"]
BYTE_BY_BYTE_TEMPLATE = [
"Byte-by-byte Description of file: $file",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
"$bytebybyte",
"--------------------------------------------------------------------------------",
]
MRT_TEMPLATE = [
"Title:",
"Authors:",
"Table:",
"================================================================================",
"$bytebybyte",
"Notes:",
"--------------------------------------------------------------------------------",
]
class MrtSplitter(fixedwidth.FixedWidthSplitter):
"""
Contains the join function to left align the MRT columns
when writing to a file.
"""
def join(self, vals, widths):
vals = [val + " " * (width - len(val)) for val, width in zip(vals, widths)]
return self.delimiter.join(vals)
class MrtHeader(cds.CdsHeader):
_subfmt = "MRT"
def _split_float_format(self, value):
"""
Splits a Float string into different parts to find number
of digits after decimal and check if the value is in Scientific
notation.
Parameters
----------
value : str
String containing the float value to split.
Returns
-------
fmt: (int, int, int, bool, bool)
List of values describing the Float string.
(size, dec, ent, sign, exp)
size, length of the given string.
ent, number of digits before decimal point.
dec, number of digits after decimal point.
sign, whether or not given value signed.
exp, is value in Scientific notation?
"""
regfloat = re.compile(
r"""(?P<sign> [+-]*)
(?P<ent> [^eE.]+)
(?P<deciPt> [.]*)
(?P<decimals> [0-9]*)
(?P<exp> [eE]*-*)[0-9]*""",
re.VERBOSE,
)
mo = regfloat.match(value)
if mo is None:
raise Exception(f"{value} is not a float number")
return (
len(value),
len(mo.group("ent")),
len(mo.group("decimals")),
mo.group("sign") != "",
mo.group("exp") != "",
)
def _set_column_val_limits(self, col):
"""
Sets the ``col.min`` and ``col.max`` column attributes,
taking into account columns with Null values.
"""
col.max = max(col)
col.min = min(col)
if col.max is np.ma.core.MaskedConstant:
col.max = None
if col.min is np.ma.core.MaskedConstant:
col.min = None
def column_float_formatter(self, col):
"""
String formatter function for a column containing Float values.
Checks if the values in the given column are in Scientific notation,
by splitting the value string. It is assumed that the column either has
float values or Scientific notation.
A ``col.formatted_width`` attribute is added to the column. It is not added
if such an attribute is already present, say when the ``formats`` argument
is passed to the writer. A properly formatted format string is also added as
the ``col.format`` attribute.
Parameters
----------
col : A ``Table.Column`` object.
"""
# maxsize: maximum length of string containing the float value.
# maxent: maximum number of digits places before decimal point.
# maxdec: maximum number of digits places after decimal point.
# maxprec: maximum precision of the column values, sum of maxent and maxdec.
maxsize, maxprec, maxent, maxdec = 1, 0, 1, 0
sign = False
fformat = "F"
# Find maximum sized value in the col
for val in col.str_vals:
# Skip null values
if val is None or val == "":
continue
# Find format of the Float string
fmt = self._split_float_format(val)
# If value is in Scientific notation
if fmt[4] is True:
# if the previous column value was in normal Float format
# set maxsize, maxprec and maxdec to default.
if fformat == "F":
maxsize, maxprec, maxdec = 1, 0, 0
# Designate the column to be in Scientific notation.
fformat = "E"
else:
# Move to next column value if
# current value is not in Scientific notation
# but the column is designated as such because
# one of the previous values was.
if fformat == "E":
continue
if maxsize < fmt[0]:
maxsize = fmt[0]
if maxent < fmt[1]:
maxent = fmt[1]
if maxdec < fmt[2]:
maxdec = fmt[2]
if fmt[3]:
sign = True
if maxprec < fmt[1] + fmt[2]:
maxprec = fmt[1] + fmt[2]
if fformat == "E":
# If ``formats`` not passed.
if getattr(col, "formatted_width", None) is None:
col.formatted_width = maxsize
if sign:
col.formatted_width += 1
# Number of digits after decimal is replaced by the precision
# for values in Scientific notation, when writing that Format.
col.fortran_format = fformat + str(col.formatted_width) + "." + str(maxprec)
col.format = str(col.formatted_width) + "." + str(maxdec) + "e"
else:
lead = ""
if (
getattr(col, "formatted_width", None) is None
): # If ``formats`` not passed.
col.formatted_width = maxent + maxdec + 1
if sign:
col.formatted_width += 1
elif col.format.startswith("0"):
# Keep leading zero, if already set in format - primarily for `seconds` columns
# in coordinates; may need extra case if this is to be also supported with `sign`.
lead = "0"
col.fortran_format = fformat + str(col.formatted_width) + "." + str(maxdec)
col.format = lead + col.fortran_format[1:] + "f"
def write_byte_by_byte(self):
"""
Writes the Byte-By-Byte description of the table.
Columns that are `astropy.coordinates.SkyCoord` or `astropy.time.TimeSeries`
objects or columns with values that are such objects are recognized as such,
and some predefined labels and description is used for them.
See the Vizier MRT Standard documentation in the link below for more details
on these. An example Byte-By-Byte table is shown here.
See: http://vizier.u-strasbg.fr/doc/catstd-3.1.htx
Example::
--------------------------------------------------------------------------------
Byte-by-byte Description of file: table.dat
--------------------------------------------------------------------------------
Bytes Format Units Label Explanations
--------------------------------------------------------------------------------
1- 8 A8 --- names Description of names
10-14 E5.1 --- e [-3160000.0/0.01] Description of e
16-23 F8.5 --- d [22.25/27.25] Description of d
25-31 E7.1 --- s [-9e+34/2.0] Description of s
33-35 I3 --- i [-30/67] Description of i
37-39 F3.1 --- sameF [5.0/5.0] Description of sameF
41-42 I2 --- sameI [20] Description of sameI
44-45 I2 h RAh Right Ascension (hour)
47-48 I2 min RAm Right Ascension (minute)
50-67 F18.15 s RAs Right Ascension (second)
69 A1 --- DE- Sign of Declination
70-71 I2 deg DEd Declination (degree)
73-74 I2 arcmin DEm Declination (arcmin)
76-91 F16.13 arcsec DEs Declination (arcsec)
--------------------------------------------------------------------------------
"""
# Get column widths
vals_list = []
col_str_iters = self.data.str_vals()
for vals in zip(*col_str_iters):
vals_list.append(vals)
for i, col in enumerate(self.cols):
col.width = max(len(vals[i]) for vals in vals_list)
if self.start_line is not None:
col.width = max(col.width, len(col.info.name))
widths = [col.width for col in self.cols]
startb = 1 # Byte count starts at 1.
# Set default width of the Bytes count column of the Byte-By-Byte table.
# This ``byte_count_width`` value helps align byte counts with respect
# to the hyphen using a format string.
byte_count_width = len(str(sum(widths) + len(self.cols) - 1))
# Format string for Start Byte and End Byte
singlebfmt = "{:" + str(byte_count_width) + "d}"
fmtb = singlebfmt + "-" + singlebfmt
# Add trailing single whitespaces to Bytes column for better visibility.
singlebfmt += " "
fmtb += " "
# Set default width of Label and Description Byte-By-Byte columns.
max_label_width, max_descrip_size = 7, 16
bbb = Table(
names=["Bytes", "Format", "Units", "Label", "Explanations"], dtype=[str] * 5
)
# Iterate over the columns to write Byte-By-Byte rows.
for i, col in enumerate(self.cols):
# Check if column is MaskedColumn
col.has_null = isinstance(col, MaskedColumn)
if col.format is not None:
col.formatted_width = max(len(sval) for sval in col.str_vals)
# Set MRTColumn type, size and format.
if np.issubdtype(col.dtype, np.integer):
# Integer formatter
self._set_column_val_limits(col)
# If ``formats`` not passed.
if getattr(col, "formatted_width", None) is None:
col.formatted_width = max(len(str(col.max)), len(str(col.min)))
col.fortran_format = "I" + str(col.formatted_width)
if col.format is None:
col.format = ">" + col.fortran_format[1:]
elif np.issubdtype(col.dtype, np.dtype(float).type):
# Float formatter
self._set_column_val_limits(col)
self.column_float_formatter(col)
else:
# String formatter, ``np.issubdtype(col.dtype, str)`` is ``True``.
dtype = col.dtype.str
if col.has_null:
mcol = col
mcol.fill_value = ""
coltmp = Column(mcol.filled(), dtype=str)
dtype = coltmp.dtype.str
# If ``formats`` not passed.
if getattr(col, "formatted_width", None) is None:
col.formatted_width = int(re.search(r"(\d+)$", dtype).group(1))
col.fortran_format = "A" + str(col.formatted_width)
col.format = str(col.formatted_width) + "s"
endb = col.formatted_width + startb - 1
# ``mixin`` columns converted to string valued columns will not have a name
# attribute. In those cases, a ``Unknown`` column label is put, indicating that
# such columns can be better formatted with some manipulation before calling
# the MRT writer.
if col.name is None:
col.name = "Unknown"
# Set column description.
if col.description is not None:
description = col.description
else:
description = "Description of " + col.name
# Set null flag in column description
nullflag = ""
if col.has_null:
nullflag = "?"
# Set column unit
if col.unit is not None:
col_unit = col.unit.to_string("cds")
elif col.name.lower().find("magnitude") > -1:
# ``col.unit`` can still be ``None``, if the unit of column values
# is ``Magnitude``, because ``astropy.units.Magnitude`` is actually a class.
# Unlike other units which are instances of ``astropy.units.Unit``,
# application of the ``Magnitude`` unit calculates the logarithm
# of the values. Thus, the only way to check for if the column values
# have ``Magnitude`` unit is to check the column name.
col_unit = "mag"
else:
col_unit = "---"
# Add col limit values to col description
lim_vals = ""
if (
col.min
and col.max
and not any(
x in col.name for x in ["RA", "DE", "LON", "LAT", "PLN", "PLT"]
)
):
# No col limit values for coordinate columns.
if col.fortran_format[0] == "I":
if (
abs(col.min) < MAX_COL_INTLIMIT
and abs(col.max) < MAX_COL_INTLIMIT
):
if col.min == col.max:
lim_vals = f"[{col.min}]"
else:
lim_vals = f"[{col.min}/{col.max}]"
elif col.fortran_format[0] in ("E", "F"):
lim_vals = (
f"[{floor(col.min * 100) / 100.}/{ceil(col.max * 100) / 100.}]"
)
if lim_vals != "" or nullflag != "":
description = f"{lim_vals}{nullflag} {description}"
# Find the maximum label and description column widths.
if len(col.name) > max_label_width:
max_label_width = len(col.name)
if len(description) > max_descrip_size:
max_descrip_size = len(description)
# Add a row for the Sign of Declination in the bbb table
if col.name == "DEd":
bbb.add_row(
[
singlebfmt.format(startb),
"A1",
"---",
"DE-",
"Sign of Declination",
]
)
col.fortran_format = "I2"
startb += 1
# Add Byte-By-Byte row to bbb table
bbb.add_row(
[
singlebfmt.format(startb)
if startb == endb
else fmtb.format(startb, endb),
"" if col.fortran_format is None else col.fortran_format,
col_unit,
"" if col.name is None else col.name,
description,
]
)
startb = endb + 2
# Properly format bbb columns
bbblines = StringIO()
bbb.write(
bbblines,
format="ascii.fixed_width_no_header",
delimiter=" ",
bookend=False,
delimiter_pad=None,
formats={
"Format": "<6s",
"Units": "<6s",
"Label": "<" + str(max_label_width) + "s",
"Explanations": "" + str(max_descrip_size) + "s",
},
)
# Get formatted bbb lines
bbblines = bbblines.getvalue().splitlines()
# ``nsplit`` is the number of whitespaces to prefix to long description
# lines in order to wrap them. It is the sum of the widths of the
# previous 4 columns plus the number of single spacing between them.
# The hyphen in the Bytes column is also counted.
nsplit = byte_count_width * 2 + 1 + 12 + max_label_width + 4
# Wrap line if it is too long
buff = ""
for newline in bbblines:
if len(newline) > MAX_SIZE_README_LINE:
buff += ("\n").join(
wrap(
newline,
subsequent_indent=" " * nsplit,
width=MAX_SIZE_README_LINE,
)
)
buff += "\n"
else:
buff += newline + "\n"
# Last value of ``endb`` is the sum of column widths after formatting.
self.linewidth = endb
# Remove the last extra newline character from Byte-By-Byte.
buff = buff[:-1]
return buff
def write(self, lines):
"""
Writes the Header of the MRT table, aka ReadMe, which
also contains the Byte-By-Byte description of the table.
"""
from astropy.coordinates import SkyCoord
# Recognised ``SkyCoord.name`` forms with their default column names (helio* require SunPy).
coord_systems = {
"galactic": ("GLAT", "GLON", "b", "l"),
"ecliptic": ("ELAT", "ELON", "lat", "lon"), # 'geocentric*ecliptic'
"heliographic": ("HLAT", "HLON", "lat", "lon"), # '_carrington|stonyhurst'
"helioprojective": ("HPLT", "HPLN", "Ty", "Tx"),
}
eqtnames = ["RAh", "RAm", "RAs", "DEd", "DEm", "DEs"]
# list to store indices of columns that are modified.
to_pop = []
# For columns that are instances of ``SkyCoord`` and other ``mixin`` columns
# or whose values are objects of these classes.
for i, col in enumerate(self.cols):
# If col is a ``Column`` object but its values are ``SkyCoord`` objects,
# convert the whole column to ``SkyCoord`` object, which helps in applying
# SkyCoord methods directly.
if not isinstance(col, SkyCoord) and isinstance(col[0], SkyCoord):
try:
col = SkyCoord(col)
except (ValueError, TypeError):
# If only the first value of the column is a ``SkyCoord`` object,
# the column cannot be converted to a ``SkyCoord`` object.
# These columns are converted to ``Column`` object and then converted
# to string valued column.
if not isinstance(col, Column):
col = Column(col)
col = Column([str(val) for val in col])
self.cols[i] = col
continue
# Replace single ``SkyCoord`` column by its coordinate components if no coordinate
# columns of the corresponding type exist yet.
if isinstance(col, SkyCoord):
# If coordinates are given in RA/DEC, divide each them into hour/deg,
# minute/arcminute, second/arcsecond columns.
if (
"ra" in col.representation_component_names.keys()
and len(set(eqtnames) - set(self.colnames)) == 6
):
ra_c, dec_c = col.ra.hms, col.dec.dms
coords = [
ra_c.h.round().astype("i1"),
ra_c.m.round().astype("i1"),
ra_c.s,
dec_c.d.round().astype("i1"),
dec_c.m.round().astype("i1"),
dec_c.s,
]
coord_units = [u.h, u.min, u.second, u.deg, u.arcmin, u.arcsec]
coord_descrip = [
"Right Ascension (hour)",
"Right Ascension (minute)",
"Right Ascension (second)",
"Declination (degree)",
"Declination (arcmin)",
"Declination (arcsec)",
]
for coord, name, coord_unit, descrip in zip(
coords, eqtnames, coord_units, coord_descrip
):
# Have Sign of Declination only in the DEd column.
if name in ["DEm", "DEs"]:
coord_col = Column(
list(np.abs(coord)),
name=name,
unit=coord_unit,
description=descrip,
)
else:
coord_col = Column(
list(coord),
name=name,
unit=coord_unit,
description=descrip,
)
# Set default number of digits after decimal point for the
# second values, and deg-min to (signed) 2-digit zero-padded integer.
if name == "RAs":
coord_col.format = "013.10f"
elif name == "DEs":
coord_col.format = "012.9f"
elif name == "RAh":
coord_col.format = "2d"
elif name == "DEd":
coord_col.format = "+03d"
elif name.startswith(("RA", "DE")):
coord_col.format = "02d"
self.cols.append(coord_col)
to_pop.append(i) # Delete original ``SkyCoord`` column.
# For all other coordinate types, simply divide into two columns
# for latitude and longitude resp. with the unit used been as it is.
else:
frminfo = ""
for frame, latlon in coord_systems.items():
if (
frame in col.name
and len(set(latlon[:2]) - set(self.colnames)) == 2
):
if frame != col.name:
frminfo = f" ({col.name})"
lon_col = Column(
getattr(col, latlon[3]),
name=latlon[1],
description=f"{frame.capitalize()} Longitude{frminfo}",
unit=col.representation_component_units[latlon[3]],
format=".12f",
)
lat_col = Column(
getattr(col, latlon[2]),
name=latlon[0],
description=f"{frame.capitalize()} Latitude{frminfo}",
unit=col.representation_component_units[latlon[2]],
format="+.12f",
)
self.cols.append(lon_col)
self.cols.append(lat_col)
to_pop.append(i) # Delete original ``SkyCoord`` column.
# Convert all other ``SkyCoord`` columns that are not in the above three
# representations to string valued columns. Those could either be types not
# supported yet (e.g. 'helioprojective'), or already present and converted.
# If there were any extra ``SkyCoord`` columns of one kind after the first one,
# then their decomposition into their component columns has been skipped.
# This is done in order to not create duplicate component columns.
# Explicit renaming of the extra coordinate component columns by appending some
# suffix to their name, so as to distinguish them, is not yet implemented.
if i not in to_pop:
warnings.warn(
f"Coordinate system of type '{col.name}' already stored in"
" table as CDS/MRT-syle columns or of unrecognized type. So"
f" column {i} is being skipped with designation of a string"
f" valued column `{self.colnames[i]}`.",
UserWarning,
)
self.cols.append(Column(col.to_string(), name=self.colnames[i]))
to_pop.append(i) # Delete original ``SkyCoord`` column.
# Convert all other ``mixin`` columns to ``Column`` objects.
# Parsing these may still lead to errors!
elif not isinstance(col, Column):
col = Column(col)
# If column values are ``object`` types, convert them to string.
if np.issubdtype(col.dtype, np.dtype(object).type):
col = Column([str(val) for val in col])
self.cols[i] = col
# Delete original ``SkyCoord`` columns, if there were any.
for i in to_pop[::-1]:
self.cols.pop(i)
# Check for any left over extra coordinate columns.
if any(x in self.colnames for x in ["RAh", "DEd", "ELON", "GLAT"]):
# At this point any extra ``SkyCoord`` columns should have been converted to string
# valued columns, together with issuance of a warning, by the coordinate parser above.
# This test is just left here as a safeguard.
for i, col in enumerate(self.cols):
if isinstance(col, SkyCoord):
self.cols[i] = Column(col.to_string(), name=self.colnames[i])
message = (
"Table already has coordinate system in CDS/MRT-syle columns. "
f"So column {i} should have been replaced already with "
f"a string valued column `{self.colnames[i]}`."
)
raise core.InconsistentTableError(message)
# Get Byte-By-Byte description and fill the template
bbb_template = Template("\n".join(BYTE_BY_BYTE_TEMPLATE))
byte_by_byte = bbb_template.substitute(
{"file": "table.dat", "bytebybyte": self.write_byte_by_byte()}
)
# Fill up the full ReadMe
rm_template = Template("\n".join(MRT_TEMPLATE))
readme_filled = rm_template.substitute({"bytebybyte": byte_by_byte})
lines.append(readme_filled)
class MrtData(cds.CdsData):
"""MRT table data reader."""
_subfmt = "MRT"
splitter_class = MrtSplitter
def write(self, lines):
self.splitter.delimiter = " "
fixedwidth.FixedWidthData.write(self, lines)
class Mrt(core.BaseReader):
"""AAS MRT (Machine-Readable Table) format table.
**Reading**
::
>>> from astropy.io import ascii
>>> table = ascii.read('data.mrt', format='mrt')
**Writing**
Use ``ascii.write(table, 'data.mrt', format='mrt')`` to write tables to
Machine Readable Table (MRT) format.
Note that the metadata of the table, apart from units, column names and
description, will not be written. These have to be filled in by hand later.
See also: :ref:`cds_mrt_format`.
Caveats:
* The Units and Explanations are available in the column ``unit`` and
``description`` attributes, respectively.
* The other metadata defined by this format is not available in the output table.
"""
_format_name = "mrt"
_io_registry_format_aliases = ["mrt"]
_io_registry_can_write = True
_description = "MRT format table"
data_class = MrtData
header_class = MrtHeader
def write(self, table=None):
# Construct for writing empty table is not yet done.
if len(table) == 0:
raise NotImplementedError
self.data.header = self.header
self.header.position_line = None
self.header.start_line = None
# Create a copy of the ``table``, so that it the copy gets modified and
# written to the file, while the original table remains as it is.
table = table.copy()
return super().write(table)
|
073ee8f0573ea5eccd176c703eb383217f4536b8c7f91d6977100ef5757e85ba | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
latex.py:
Classes to read and write LaTeX tables
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import re
from . import core
latexdicts = {
"AA": {
"tabletype": "table",
"header_start": r"\hline \hline",
"header_end": r"\hline",
"data_end": r"\hline",
},
"doublelines": {
"tabletype": "table",
"header_start": r"\hline \hline",
"header_end": r"\hline\hline",
"data_end": r"\hline\hline",
},
"template": {
"tabletype": "tabletype",
"caption": "caption",
"tablealign": "tablealign",
"col_align": "col_align",
"preamble": "preamble",
"header_start": "header_start",
"header_end": "header_end",
"data_start": "data_start",
"data_end": "data_end",
"tablefoot": "tablefoot",
"units": {"col1": "unit of col1", "col2": "unit of col2"},
},
}
RE_COMMENT = re.compile(r"(?<!\\)%") # % character but not \%
def add_dictval_to_list(adict, key, alist):
"""
Add a value from a dictionary to a list.
Parameters
----------
adict : dictionary
key : hashable
alist : list
List where value should be added
"""
if key in adict:
if isinstance(adict[key], str):
alist.append(adict[key])
else:
alist.extend(adict[key])
def find_latex_line(lines, latex):
"""
Find the first line which matches a pattern.
Parameters
----------
lines : list
List of strings
latex : str
Search pattern
Returns
-------
line_num : int, None
Line number. Returns None, if no match was found
"""
re_string = re.compile(latex.replace("\\", "\\\\"))
for i, line in enumerate(lines):
if re_string.match(line):
return i
else:
return None
class LatexInputter(core.BaseInputter):
def process_lines(self, lines):
return [lin.strip() for lin in lines]
class LatexSplitter(core.BaseSplitter):
"""Split LaTeX table data. Default delimiter is `&`."""
delimiter = "&"
def __call__(self, lines):
last_line = RE_COMMENT.split(lines[-1])[0].strip()
if not last_line.endswith(r"\\"):
lines[-1] = last_line + r"\\"
return super().__call__(lines)
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. Also remove
\\ at end of line.
"""
line = RE_COMMENT.split(line)[0]
line = line.strip()
if line.endswith(r"\\"):
line = line.rstrip(r"\\")
else:
raise core.InconsistentTableError(
r"Lines in LaTeX table have to end with \\"
)
return line
def process_val(self, val):
"""Remove whitespace and {} at the beginning or end of value."""
val = val.strip()
if val and (val[0] == "{") and (val[-1] == "}"):
val = val[1:-1]
return val
def join(self, vals):
"""Join values together and add a few extra spaces for readability."""
delimiter = " " + self.delimiter + " "
return delimiter.join(x.strip() for x in vals) + r" \\"
class LatexHeader(core.BaseHeader):
"""Class to read the header of Latex Tables."""
header_start = r"\begin{tabular}"
splitter_class = LatexSplitter
def start_line(self, lines):
line = find_latex_line(lines, self.header_start)
if line is not None:
return line + 1
else:
return None
def _get_units(self):
units = {}
col_units = [col.info.unit for col in self.cols]
for name, unit in zip(self.colnames, col_units):
if unit:
try:
units[name] = unit.to_string(format="latex_inline")
except AttributeError:
units[name] = unit
return units
def write(self, lines):
if "col_align" not in self.latex:
self.latex["col_align"] = len(self.cols) * "c"
if "tablealign" in self.latex:
align = "[" + self.latex["tablealign"] + "]"
else:
align = ""
if self.latex["tabletype"] is not None:
lines.append(r"\begin{" + self.latex["tabletype"] + r"}" + align)
add_dictval_to_list(self.latex, "preamble", lines)
if "caption" in self.latex:
lines.append(r"\caption{" + self.latex["caption"] + "}")
lines.append(self.header_start + r"{" + self.latex["col_align"] + r"}")
add_dictval_to_list(self.latex, "header_start", lines)
lines.append(self.splitter.join(self.colnames))
units = self._get_units()
if "units" in self.latex:
units.update(self.latex["units"])
if units:
lines.append(
self.splitter.join([units.get(name, " ") for name in self.colnames])
)
add_dictval_to_list(self.latex, "header_end", lines)
class LatexData(core.BaseData):
"""Class to read the data in LaTeX tables."""
data_start = None
data_end = r"\end{tabular}"
splitter_class = LatexSplitter
def start_line(self, lines):
if self.data_start:
return find_latex_line(lines, self.data_start)
else:
start = self.header.start_line(lines)
if start is None:
raise core.InconsistentTableError(r"Could not find table start")
return start + 1
def end_line(self, lines):
if self.data_end:
return find_latex_line(lines, self.data_end)
else:
return None
def write(self, lines):
add_dictval_to_list(self.latex, "data_start", lines)
core.BaseData.write(self, lines)
add_dictval_to_list(self.latex, "data_end", lines)
lines.append(self.data_end)
add_dictval_to_list(self.latex, "tablefoot", lines)
if self.latex["tabletype"] is not None:
lines.append(r"\end{" + self.latex["tabletype"] + "}")
class Latex(core.BaseReader):
r"""LaTeX format table.
This class implements some LaTeX specific commands. Its main
purpose is to write out a table in a form that LaTeX can compile. It
is beyond the scope of this class to implement every possible LaTeX
command, instead the focus is to generate a syntactically valid
LaTeX tables.
This class can also read simple LaTeX tables (one line per table
row, no ``\multicolumn`` or similar constructs), specifically, it
can read the tables that it writes.
Reading a LaTeX table, the following keywords are accepted:
**ignore_latex_commands** :
Lines starting with these LaTeX commands will be treated as comments (i.e. ignored).
When writing a LaTeX table, the some keywords can customize the
format. Care has to be taken here, because python interprets ``\\``
in a string as an escape character. In order to pass this to the
output either format your strings as raw strings with the ``r``
specifier or use a double ``\\\\``.
Examples::
caption = r'My table \label{mytable}'
caption = 'My table \\\\label{mytable}'
**latexdict** : Dictionary of extra parameters for the LaTeX output
* tabletype : used for first and last line of table.
The default is ``\\begin{table}``. The following would generate a table,
which spans the whole page in a two-column document::
ascii.write(data, sys.stdout, Writer = ascii.Latex,
latexdict = {'tabletype': 'table*'})
If ``None``, the table environment will be dropped, keeping only
the ``tabular`` environment.
* tablealign : positioning of table in text.
The default is not to specify a position preference in the text.
If, e.g. the alignment is ``ht``, then the LaTeX will be ``\\begin{table}[ht]``.
* col_align : Alignment of columns
If not present all columns will be centered.
* caption : Table caption (string or list of strings)
This will appear above the table as it is the standard in
many scientific publications. If you prefer a caption below
the table, just write the full LaTeX command as
``latexdict['tablefoot'] = r'\caption{My table}'``
* preamble, header_start, header_end, data_start, data_end, tablefoot: Pure LaTeX
Each one can be a string or a list of strings. These strings
will be inserted into the table without any further
processing. See the examples below.
* units : dictionary of strings
Keys in this dictionary should be names of columns. If
present, a line in the LaTeX table directly below the column
names is added, which contains the values of the
dictionary. Example::
from astropy.io import ascii
data = {'name': ['bike', 'car'], 'mass': [75,1200], 'speed': [10, 130]}
ascii.write(data, Writer=ascii.Latex,
latexdict = {'units': {'mass': 'kg', 'speed': 'km/h'}})
If the column has no entry in the ``units`` dictionary, it defaults
to the **unit** attribute of the column. If this attribute is not
specified (i.e. it is None), the unit will be written as ``' '``.
Run the following code to see where each element of the
dictionary is inserted in the LaTeX table::
from astropy.io import ascii
data = {'cola': [1,2], 'colb': [3,4]}
ascii.write(data, Writer=ascii.Latex, latexdict=ascii.latex.latexdicts['template'])
Some table styles are predefined in the dictionary
``ascii.latex.latexdicts``. The following generates in table in
style preferred by A&A and some other journals::
ascii.write(data, Writer=ascii.Latex, latexdict=ascii.latex.latexdicts['AA'])
As an example, this generates a table, which spans all columns
and is centered on the page::
ascii.write(data, Writer=ascii.Latex, col_align='|lr|',
latexdict={'preamble': r'\begin{center}',
'tablefoot': r'\end{center}',
'tabletype': 'table*'})
**caption** : Set table caption
Shorthand for::
latexdict['caption'] = caption
**col_align** : Set the column alignment.
If not present this will be auto-generated for centered
columns. Shorthand for::
latexdict['col_align'] = col_align
"""
_format_name = "latex"
_io_registry_format_aliases = ["latex"]
_io_registry_suffix = ".tex"
_description = "LaTeX table"
header_class = LatexHeader
data_class = LatexData
inputter_class = LatexInputter
# Strictly speaking latex only supports 1-d columns so this should inherit
# the base max_ndim = 1. But as reported in #11695 this causes a strange
# problem with Jupyter notebook, which displays a table by first calling
# _repr_latex_. For a multidimensional table this issues a stack traceback
# before moving on to _repr_html_. Here we prioritize fixing the issue with
# Jupyter displaying a Table with multidimensional columns.
max_ndim = None
def __init__(
self,
ignore_latex_commands=[
"hline",
"vspace",
"tableline",
"toprule",
"midrule",
"bottomrule",
],
latexdict={},
caption="",
col_align=None,
):
super().__init__()
self.latex = {}
# The latex dict drives the format of the table and needs to be shared
# with data and header
self.header.latex = self.latex
self.data.latex = self.latex
self.latex["tabletype"] = "table"
self.latex.update(latexdict)
if caption:
self.latex["caption"] = caption
if col_align:
self.latex["col_align"] = col_align
self.ignore_latex_commands = ignore_latex_commands
self.header.comment = "%|" + "|".join(
[r"\\" + command for command in self.ignore_latex_commands]
)
self.data.comment = self.header.comment
def write(self, table=None):
self.header.start_line = None
self.data.start_line = None
return core.BaseReader.write(self, table=table)
class AASTexHeaderSplitter(LatexSplitter):
r"""Extract column names from a `deluxetable`_.
This splitter expects the following LaTeX code **in a single line**:
\tablehead{\colhead{col1} & ... & \colhead{coln}}
"""
def __call__(self, lines):
return super(LatexSplitter, self).__call__(lines)
def process_line(self, line):
"""extract column names from tablehead."""
line = line.split("%")[0]
line = line.replace(r"\tablehead", "")
line = line.strip()
if (line[0] == "{") and (line[-1] == "}"):
line = line[1:-1]
else:
raise core.InconsistentTableError(r"\tablehead is missing {}")
return line.replace(r"\colhead", "")
def join(self, vals):
return " & ".join([r"\colhead{" + str(x) + "}" for x in vals])
class AASTexHeader(LatexHeader):
r"""In a `deluxetable
<http://fits.gsfc.nasa.gov/standard30/deluxetable.sty>`_ some header
keywords differ from standard LaTeX.
This header is modified to take that into account.
"""
header_start = r"\tablehead"
splitter_class = AASTexHeaderSplitter
def start_line(self, lines):
return find_latex_line(lines, r"\tablehead")
def write(self, lines):
if "col_align" not in self.latex:
self.latex["col_align"] = len(self.cols) * "c"
if "tablealign" in self.latex:
align = "[" + self.latex["tablealign"] + "]"
else:
align = ""
lines.append(
r"\begin{"
+ self.latex["tabletype"]
+ r"}{"
+ self.latex["col_align"]
+ r"}"
+ align
)
add_dictval_to_list(self.latex, "preamble", lines)
if "caption" in self.latex:
lines.append(r"\tablecaption{" + self.latex["caption"] + "}")
tablehead = " & ".join([r"\colhead{" + name + "}" for name in self.colnames])
units = self._get_units()
if "units" in self.latex:
units.update(self.latex["units"])
if units:
tablehead += r"\\ " + self.splitter.join(
[units.get(name, " ") for name in self.colnames]
)
lines.append(r"\tablehead{" + tablehead + "}")
class AASTexData(LatexData):
r"""In a `deluxetable`_ the data is enclosed in `\startdata` and `\enddata`."""
data_start = r"\startdata"
data_end = r"\enddata"
def start_line(self, lines):
return find_latex_line(lines, self.data_start) + 1
def write(self, lines):
lines.append(self.data_start)
lines_length_initial = len(lines)
core.BaseData.write(self, lines)
# To remove extra space(s) and // appended which creates an extra new line
# in the end.
if len(lines) > lines_length_initial:
lines[-1] = re.sub(r"\s* \\ \\ \s* $", "", lines[-1], flags=re.VERBOSE)
lines.append(self.data_end)
add_dictval_to_list(self.latex, "tablefoot", lines)
lines.append(r"\end{" + self.latex["tabletype"] + r"}")
class AASTex(Latex):
"""AASTeX format table.
This class implements some AASTeX specific commands.
AASTeX is used for the AAS (American Astronomical Society)
publications like ApJ, ApJL and AJ.
It derives from the ``Latex`` reader and accepts the same
keywords. However, the keywords ``header_start``, ``header_end``,
``data_start`` and ``data_end`` in ``latexdict`` have no effect.
"""
_format_name = "aastex"
_io_registry_format_aliases = ["aastex"]
_io_registry_suffix = "" # AASTex inherits from Latex, so override this class attr
_description = "AASTeX deluxetable used for AAS journals"
header_class = AASTexHeader
data_class = AASTexData
def __init__(self, **kwargs):
super().__init__(**kwargs)
# check if tabletype was explicitly set by the user
if not (("latexdict" in kwargs) and ("tabletype" in kwargs["latexdict"])):
self.latex["tabletype"] = "deluxetable"
|
2c6ea5ffd18980c2255e010f4501fe77cfe066178cf2e68658bc2ca9d2a912d2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
ipac.py:
Classes to read IPAC table format
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import re
from collections import OrderedDict, defaultdict
from textwrap import wrap
from warnings import warn
from astropy.table.pprint import get_auto_format_func
from astropy.utils.exceptions import AstropyUserWarning
from . import basic, core, fixedwidth
class IpacFormatErrorDBMS(Exception):
def __str__(self):
return "{}\nSee {}".format(
super().__str__(),
"https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/DBMSrestriction.html",
)
class IpacFormatError(Exception):
def __str__(self):
return "{}\nSee {}".format(
super().__str__(),
"https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html",
)
class IpacHeaderSplitter(core.BaseSplitter):
"""Splitter for Ipac Headers.
This splitter is similar its parent when reading, but supports a
fixed width format (as required for Ipac table headers) for writing.
"""
process_line = None
process_val = None
delimiter = "|"
delimiter_pad = ""
skipinitialspace = False
comment = r"\s*\\"
write_comment = r"\\"
col_starts = None
col_ends = None
def join(self, vals, widths):
pad = self.delimiter_pad or ""
delimiter = self.delimiter or ""
padded_delim = pad + delimiter + pad
bookend_left = delimiter + pad
bookend_right = pad + delimiter
vals = [" " * (width - len(val)) + val for val, width in zip(vals, widths)]
return bookend_left + padded_delim.join(vals) + bookend_right
class IpacHeader(fixedwidth.FixedWidthHeader):
"""IPAC table header."""
splitter_class = IpacHeaderSplitter
# Defined ordered list of possible types. Ordering is needed to
# distinguish between "d" (double) and "da" (date) as defined by
# the IPAC standard for abbreviations. This gets used in get_col_type().
col_type_list = (
("integer", core.IntType),
("long", core.IntType),
("double", core.FloatType),
("float", core.FloatType),
("real", core.FloatType),
("char", core.StrType),
("date", core.StrType),
)
definition = "ignore"
start_line = None
def process_lines(self, lines):
"""Generator to yield IPAC header lines, i.e. those starting and ending with
delimiter character (with trailing whitespace stripped).
"""
delim = self.splitter.delimiter
for line in lines:
line = line.rstrip()
if line.startswith(delim) and line.endswith(delim):
yield line.strip(delim)
def update_meta(self, lines, meta):
"""
Extract table-level comments and keywords for IPAC table. See:
https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html#kw.
"""
def process_keyword_value(val):
"""
Take a string value and convert to float, int or str, and strip quotes
as needed.
"""
val = val.strip()
try:
val = int(val)
except Exception:
try:
val = float(val)
except Exception:
# Strip leading/trailing quote. The spec says that a matched pair
# of quotes is required, but this code will allow a non-quoted value.
for quote in ('"', "'"):
if val.startswith(quote) and val.endswith(quote):
val = val[1:-1]
break
return val
table_meta = meta["table"]
table_meta["comments"] = []
table_meta["keywords"] = OrderedDict()
keywords = table_meta["keywords"]
# fmt: off
re_keyword = re.compile(
r'\\'
r'(?P<name> \w+)'
r'\s* = (?P<value> .+) $',
re.VERBOSE
)
# fmt: on
for line in lines:
# Keywords and comments start with "\". Once the first non-slash
# line is seen then bail out.
if not line.startswith("\\"):
break
m = re_keyword.match(line)
if m:
name = m.group("name")
val = process_keyword_value(m.group("value"))
# IPAC allows for continuation keywords, e.g.
# \SQL = 'WHERE '
# \SQL = 'SELECT (25 column names follow in next row.)'
if name in keywords and isinstance(val, str):
prev_val = keywords[name]["value"]
if isinstance(prev_val, str):
val = prev_val + val
keywords[name] = {"value": val}
else:
# Comment is required to start with "\ "
if line.startswith("\\ "):
val = line[2:].strip()
if val:
table_meta["comments"].append(val)
def get_col_type(self, col):
for col_type_key, col_type in self.col_type_list:
if col_type_key.startswith(col.raw_type.lower()):
return col_type
else:
raise ValueError(
f'Unknown data type ""{col.raw_type}"" for column "{col.name}"'
)
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines``.
Based on the previously set Header attributes find or create the column names.
Sets ``self.cols`` with the list of Columns.
Parameters
----------
lines : list
List of table lines
"""
# generator returning valid header lines
header_lines = self.process_lines(lines)
header_vals = [vals for vals in self.splitter(header_lines)]
if len(header_vals) == 0:
raise ValueError(
"At least one header line beginning and ending with delimiter required"
)
elif len(header_vals) > 4:
raise ValueError("More than four header lines were found")
# Generate column definitions
cols = []
start = 1
for i, name in enumerate(header_vals[0]):
col = core.Column(name=name.strip(" -"))
col.start = start
col.end = start + len(name)
if len(header_vals) > 1:
col.raw_type = header_vals[1][i].strip(" -")
col.type = self.get_col_type(col)
if len(header_vals) > 2:
col.unit = header_vals[2][i].strip() or None # Can't strip dashes here
if len(header_vals) > 3:
# The IPAC null value corresponds to the io.ascii bad_value.
# In this case there isn't a fill_value defined, so just put
# in the minimal entry that is sure to convert properly to the
# required type.
#
# Strip spaces but not dashes (not allowed in NULL row per
# https://github.com/astropy/astropy/issues/361)
null = header_vals[3][i].strip()
fillval = "" if issubclass(col.type, core.StrType) else "0"
self.data.fill_values.append((null, fillval, col.name))
start = col.end + 1
cols.append(col)
# Correct column start/end based on definition
if self.ipac_definition == "right":
col.start -= 1
elif self.ipac_definition == "left":
col.end += 1
self.names = [x.name for x in cols]
self.cols = cols
def str_vals(self):
if self.DBMS:
IpacFormatE = IpacFormatErrorDBMS
else:
IpacFormatE = IpacFormatError
namelist = self.colnames
if self.DBMS:
countnamelist = defaultdict(int)
for name in self.colnames:
countnamelist[name.lower()] += 1
doublenames = [x for x in countnamelist if countnamelist[x] > 1]
if doublenames != []:
raise IpacFormatE(
"IPAC DBMS tables are not case sensitive. "
f"This causes duplicate column names: {doublenames}"
)
for name in namelist:
m = re.match(r"\w+", name)
if m.end() != len(name):
raise IpacFormatE(
f"{name} - Only alphanumeric characters and _ "
"are allowed in column names."
)
if self.DBMS and not (name[0].isalpha() or (name[0] == "_")):
raise IpacFormatE(f"Column name cannot start with numbers: {name}")
if self.DBMS:
if name in ["x", "y", "z", "X", "Y", "Z"]:
raise IpacFormatE(
f"{name} - x, y, z, X, Y, Z are reserved names and "
"cannot be used as column names."
)
if len(name) > 16:
raise IpacFormatE(
f"{name} - Maximum length for column name is 16 characters"
)
else:
if len(name) > 40:
raise IpacFormatE(
f"{name} - Maximum length for column name is 40 characters."
)
dtypelist = []
unitlist = []
nullist = []
for col in self.cols:
col_dtype = col.info.dtype
col_unit = col.info.unit
col_format = col.info.format
if col_dtype.kind in ["i", "u"]:
if col_dtype.itemsize <= 2:
dtypelist.append("int")
else:
dtypelist.append("long")
elif col_dtype.kind == "f":
if col_dtype.itemsize <= 4:
dtypelist.append("float")
else:
dtypelist.append("double")
else:
dtypelist.append("char")
if col_unit is None:
unitlist.append("")
else:
unitlist.append(str(col.info.unit))
# This may be incompatible with mixin columns
null = col.fill_values[core.masked]
try:
auto_format_func = get_auto_format_func(col)
format_func = col.info._format_funcs.get(col_format, auto_format_func)
nullist.append((format_func(col_format, null)).strip())
except Exception:
# It is possible that null and the column values have different
# data types (e.g. number and null = 'null' (i.e. a string).
# This could cause all kinds of exceptions, so a catch all
# block is needed here
nullist.append(str(null).strip())
return [namelist, dtypelist, unitlist, nullist]
def write(self, lines, widths):
"""Write header.
The width of each column is determined in Ipac.write. Writing the header
must be delayed until that time.
This function is called from there, once the width information is
available.
"""
for vals in self.str_vals():
lines.append(self.splitter.join(vals, widths))
return lines
class IpacDataSplitter(fixedwidth.FixedWidthSplitter):
delimiter = " "
delimiter_pad = ""
bookend = True
class IpacData(fixedwidth.FixedWidthData):
"""IPAC table data reader."""
comment = r"[|\\]"
start_line = 0
splitter_class = IpacDataSplitter
fill_values = [(core.masked, "null")]
def write(self, lines, widths, vals_list):
"""IPAC writer, modified from FixedWidth writer."""
for vals in vals_list:
lines.append(self.splitter.join(vals, widths))
return lines
class Ipac(basic.Basic):
r"""IPAC format table.
See: https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html
Example::
\\name=value
\\ Comment
| column1 | column2 | column3 | column4 | column5 |
| double | double | int | double | char |
| unit | unit | unit | unit | unit |
| null | null | null | null | null |
2.0978 29.09056 73765 2.06000 B8IVpMnHg
Or::
|-----ra---|----dec---|---sao---|------v---|----sptype--------|
2.09708 29.09056 73765 2.06000 B8IVpMnHg
The comments and keywords defined in the header are available via the output
table ``meta`` attribute::
>>> import os
>>> from astropy.io import ascii
>>> filename = os.path.join(ascii.__path__[0], 'tests/data/ipac.dat')
>>> data = ascii.read(filename)
>>> print(data.meta['comments'])
['This is an example of a valid comment']
>>> for name, keyword in data.meta['keywords'].items():
... print(name, keyword['value'])
...
intval 1
floatval 2300.0
date Wed Sp 20 09:48:36 1995
key_continue IPAC keywords can continue across lines
Note that there are different conventions for characters occurring below the
position of the ``|`` symbol in IPAC tables. By default, any character
below a ``|`` will be ignored (since this is the current standard),
but if you need to read files that assume characters below the ``|``
symbols belong to the column before or after the ``|``, you can specify
``definition='left'`` or ``definition='right'`` respectively when reading
the table (the default is ``definition='ignore'``). The following examples
demonstrate the different conventions:
* ``definition='ignore'``::
| ra | dec |
| float | float |
1.2345 6.7890
* ``definition='left'``::
| ra | dec |
| float | float |
1.2345 6.7890
* ``definition='right'``::
| ra | dec |
| float | float |
1.2345 6.7890
IPAC tables can specify a null value in the header that is shown in place
of missing or bad data. On writing, this value defaults to ``null``.
To specify a different null value, use the ``fill_values`` option to
replace masked values with a string or number of your choice as
described in :ref:`astropy:io_ascii_write_parameters`::
>>> from astropy.io.ascii import masked
>>> fill = [(masked, 'N/A', 'ra'), (masked, -999, 'sptype')]
>>> ascii.write(data, format='ipac', fill_values=fill)
\ This is an example of a valid comment
...
| ra| dec| sai| v2| sptype|
| double| double| long| double| char|
| unit| unit| unit| unit| ergs|
| N/A| null| null| null| -999|
N/A 29.09056 null 2.06 -999
2345678901.0 3456789012.0 456789012 4567890123.0 567890123456789012
When writing a table with a column of integers, the data type is output
as ``int`` when the column ``dtype.itemsize`` is less than or equal to 2;
otherwise the data type is ``long``. For a column of floating-point values,
the data type is ``float`` when ``dtype.itemsize`` is less than or equal
to 4; otherwise the data type is ``double``.
Parameters
----------
definition : str, optional
Specify the convention for characters in the data table that occur
directly below the pipe (``|``) symbol in the header column definition:
* 'ignore' - Any character beneath a pipe symbol is ignored (default)
* 'right' - Character is associated with the column to the right
* 'left' - Character is associated with the column to the left
DBMS : bool, optional
If true, this verifies that written tables adhere (semantically)
to the `IPAC/DBMS
<https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/DBMSrestriction.html>`_
definition of IPAC tables. If 'False' it only checks for the (less strict)
`IPAC <https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html>`_
definition.
"""
_format_name = "ipac"
_io_registry_format_aliases = ["ipac"]
_io_registry_can_write = True
_description = "IPAC format table"
data_class = IpacData
header_class = IpacHeader
def __init__(self, definition="ignore", DBMS=False):
super().__init__()
# Usually the header is not defined in __init__, but here it need a keyword
if definition in ["ignore", "left", "right"]:
self.header.ipac_definition = definition
else:
raise ValueError("definition should be one of ignore/left/right")
self.header.DBMS = DBMS
def write(self, table):
"""
Write ``table`` as list of strings.
Parameters
----------
table : `~astropy.table.Table`
Input table data
Returns
-------
lines : list
List of strings corresponding to ASCII table
"""
# Set a default null value for all columns by adding at the end, which
# is the position with the lowest priority.
# We have to do it this late, because the fill_value
# defined in the class can be overwritten by ui.write
self.data.fill_values.append((core.masked, "null"))
# Check column names before altering
self.header.cols = list(table.columns.values())
self.header.check_column_names(self.names, self.strict_names, self.guessing)
core._apply_include_exclude_names(
table, self.names, self.include_names, self.exclude_names
)
# Check that table has only 1-d columns.
self._check_multidim_table(table)
# Now use altered columns
new_cols = list(table.columns.values())
# link information about the columns to the writer object (i.e. self)
self.header.cols = new_cols
self.data.cols = new_cols
# Write header and data to lines list
lines = []
# Write meta information
if "comments" in table.meta:
for comment in table.meta["comments"]:
if len(str(comment)) > 78:
warn(
"Comment string > 78 characters was automatically wrapped.",
AstropyUserWarning,
)
for line in wrap(
str(comment), 80, initial_indent="\\ ", subsequent_indent="\\ "
):
lines.append(line)
if "keywords" in table.meta:
keydict = table.meta["keywords"]
for keyword in keydict:
try:
val = keydict[keyword]["value"]
lines.append(f"\\{keyword.strip()}={val!r}")
# meta is not standardized: Catch some common Errors.
except TypeError:
warn(
f"Table metadata keyword {keyword} has been skipped. "
"IPAC metadata must be in the form {{'keywords':"
"{{'keyword': {{'value': value}} }}",
AstropyUserWarning,
)
ignored_keys = [
key for key in table.meta if key not in ("keywords", "comments")
]
if any(ignored_keys):
warn(
f"Table metadata keyword(s) {ignored_keys} were not written. "
"IPAC metadata must be in the form {{'keywords':"
"{{'keyword': {{'value': value}} }}",
AstropyUserWarning,
)
# Usually, this is done in data.write, but since the header is written
# first, we need that here.
self.data._set_fill_values(self.data.cols)
# get header and data as strings to find width of each column
for i, col in enumerate(table.columns.values()):
col.headwidth = max(len(vals[i]) for vals in self.header.str_vals())
# keep data_str_vals because they take some time to make
data_str_vals = []
col_str_iters = self.data.str_vals()
for vals in zip(*col_str_iters):
data_str_vals.append(vals)
for i, col in enumerate(table.columns.values()):
# FIXME: In Python 3.4, use max([], default=0).
# See: https://docs.python.org/3/library/functions.html#max
if data_str_vals:
col.width = max(len(vals[i]) for vals in data_str_vals)
else:
col.width = 0
widths = [max(col.width, col.headwidth) for col in table.columns.values()]
# then write table
self.header.write(lines, widths)
self.data.write(lines, widths, data_str_vals)
return lines
|
5c8027c4a4005583488601781ba96a88b293a5f7693685bed3be8bff2eff749e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible HTML table reader and writer.
html.py:
Classes to read and write HTML tables
`BeautifulSoup <http://www.crummy.com/software/BeautifulSoup/>`_
must be installed to read HTML tables.
"""
import warnings
from copy import deepcopy
from astropy.table import Column
from astropy.utils.xml import writer
from . import core
class SoupString(str):
"""
Allows for strings to hold BeautifulSoup data.
"""
def __new__(cls, *args, **kwargs):
return str.__new__(cls, *args, **kwargs)
def __init__(self, val):
self.soup = val
class ListWriter:
"""
Allows for XMLWriter to write to a list instead of a file.
"""
def __init__(self, out):
self.out = out
def write(self, data):
self.out.append(data)
def identify_table(soup, htmldict, numtable):
"""
Checks whether the given BeautifulSoup tag is the table
the user intends to process.
"""
if soup is None or soup.name != "table":
return False # Tag is not a <table>
elif "table_id" not in htmldict:
return numtable == 1
table_id = htmldict["table_id"]
if isinstance(table_id, str):
return "id" in soup.attrs and soup["id"] == table_id
elif isinstance(table_id, int):
return table_id == numtable
# Return False if an invalid parameter is given
return False
class HTMLInputter(core.BaseInputter):
"""
Input lines of HTML in a valid form.
This requires `BeautifulSoup
<http://www.crummy.com/software/BeautifulSoup/>`_ to be installed.
"""
def process_lines(self, lines):
"""
Convert the given input into a list of SoupString rows
for further processing.
"""
try:
from bs4 import BeautifulSoup
except ImportError:
raise core.OptionalTableImportError(
"BeautifulSoup must be installed to read HTML tables"
)
if "parser" not in self.html:
with warnings.catch_warnings():
# Ignore bs4 parser warning #4550.
warnings.filterwarnings(
"ignore", ".*no parser was explicitly specified.*"
)
soup = BeautifulSoup("\n".join(lines))
else: # use a custom backend parser
soup = BeautifulSoup("\n".join(lines), self.html["parser"])
tables = soup.find_all("table")
for i, possible_table in enumerate(tables):
if identify_table(possible_table, self.html, i + 1):
table = possible_table # Find the correct table
break
else:
if isinstance(self.html["table_id"], int):
err_descr = f"number {self.html['table_id']}"
else:
err_descr = f"id '{self.html['table_id']}'"
raise core.InconsistentTableError(
f"ERROR: HTML table {err_descr} not found"
)
# Get all table rows
soup_list = [SoupString(x) for x in table.find_all("tr")]
return soup_list
class HTMLSplitter(core.BaseSplitter):
"""
Split HTML table data.
"""
def __call__(self, lines):
"""
Return HTML data from lines as a generator.
"""
for line in lines:
if not isinstance(line, SoupString):
raise TypeError("HTML lines should be of type SoupString")
soup = line.soup
header_elements = soup.find_all("th")
if header_elements:
# Return multicolumns as tuples for HTMLHeader handling
yield [
(el.text.strip(), el["colspan"])
if el.has_attr("colspan")
else el.text.strip()
for el in header_elements
]
data_elements = soup.find_all("td")
if data_elements:
yield [el.text.strip() for el in data_elements]
if len(lines) == 0:
raise core.InconsistentTableError(
"HTML tables must contain data in a <table> tag"
)
class HTMLOutputter(core.TableOutputter):
"""
Output the HTML data as an ``astropy.table.Table`` object.
This subclass allows for the final table to contain
multidimensional columns (defined using the colspan attribute
of <th>).
"""
default_converters = [
core.convert_numpy(int),
core.convert_numpy(float),
core.convert_numpy(str),
]
def __call__(self, cols, meta):
"""
Process the data in multidimensional columns.
"""
new_cols = []
col_num = 0
while col_num < len(cols):
col = cols[col_num]
if hasattr(col, "colspan"):
# Join elements of spanned columns together into list of tuples
span_cols = cols[col_num : col_num + col.colspan]
new_col = core.Column(col.name)
new_col.str_vals = list(zip(*[x.str_vals for x in span_cols]))
new_cols.append(new_col)
col_num += col.colspan
else:
new_cols.append(col)
col_num += 1
return super().__call__(new_cols, meta)
class HTMLHeader(core.BaseHeader):
splitter_class = HTMLSplitter
def start_line(self, lines):
"""
Return the line number at which header data begins.
"""
for i, line in enumerate(lines):
if not isinstance(line, SoupString):
raise TypeError("HTML lines should be of type SoupString")
soup = line.soup
if soup.th is not None:
return i
return None
def _set_cols_from_names(self):
"""
Set columns from header names, handling multicolumns appropriately.
"""
self.cols = []
new_names = []
for name in self.names:
if isinstance(name, tuple):
col = core.Column(name=name[0])
col.colspan = int(name[1])
self.cols.append(col)
new_names.append(name[0])
for i in range(1, int(name[1])):
# Add dummy columns
self.cols.append(core.Column(""))
new_names.append("")
else:
self.cols.append(core.Column(name=name))
new_names.append(name)
self.names = new_names
class HTMLData(core.BaseData):
splitter_class = HTMLSplitter
def start_line(self, lines):
"""
Return the line number at which table data begins.
"""
for i, line in enumerate(lines):
if not isinstance(line, SoupString):
raise TypeError("HTML lines should be of type SoupString")
soup = line.soup
if soup.td is not None:
if soup.th is not None:
raise core.InconsistentTableError(
"HTML tables cannot have headings and data in the same row"
)
return i
raise core.InconsistentTableError("No start line found for HTML data")
def end_line(self, lines):
"""
Return the line number at which table data ends.
"""
last_index = -1
for i, line in enumerate(lines):
if not isinstance(line, SoupString):
raise TypeError("HTML lines should be of type SoupString")
soup = line.soup
if soup.td is not None:
last_index = i
if last_index == -1:
return None
return last_index + 1
class HTML(core.BaseReader):
"""HTML format table.
In order to customize input and output, a dict of parameters may
be passed to this class holding specific customizations.
**htmldict** : Dictionary of parameters for HTML input/output.
* css : Customized styling
If present, this parameter will be included in a <style>
tag and will define stylistic attributes of the output.
* table_id : ID for the input table
If a string, this defines the HTML id of the table to be processed.
If an integer, this specifies the index of the input table in the
available tables. Unless this parameter is given, the reader will
use the first table found in the input file.
* multicol : Use multi-dimensional columns for output
The writer will output tuples as elements of multi-dimensional
columns if this parameter is true, and if not then it will
use the syntax 1.36583e-13 .. 1.36583e-13 for output. If not
present, this parameter will be true by default.
* raw_html_cols : column name or list of names with raw HTML content
This allows one to include raw HTML content in the column output,
for instance to include link references in a table. This option
requires that the bleach package be installed. Only whitelisted
tags are allowed through for security reasons (see the
raw_html_clean_kwargs arg).
* raw_html_clean_kwargs : dict of keyword args controlling HTML cleaning
Raw HTML will be cleaned to prevent unsafe HTML from ending up in
the table output. This is done by calling ``bleach.clean(data,
**raw_html_clean_kwargs)``. For details on the available options
(e.g. tag whitelist) see:
https://bleach.readthedocs.io/en/latest/clean.html
* parser : Specific HTML parsing library to use
If specified, this specifies which HTML parsing library
BeautifulSoup should use as a backend. The options to choose
from are 'html.parser' (the standard library parser), 'lxml'
(the recommended parser), 'xml' (lxml's XML parser), and
'html5lib'. html5lib is a highly lenient parser and therefore
might work correctly for unusual input if a different parser
fails.
* jsfiles : list of js files to include when writing table.
* cssfiles : list of css files to include when writing table.
* js : js script to include in the body when writing table.
* table_class : css class for the table
"""
_format_name = "html"
_io_registry_format_aliases = ["html"]
_io_registry_suffix = ".html"
_description = "HTML table"
header_class = HTMLHeader
data_class = HTMLData
inputter_class = HTMLInputter
max_ndim = 2 # HTML supports writing 2-d columns with shape (n, m)
def __init__(self, htmldict={}):
"""
Initialize classes for HTML reading and writing.
"""
super().__init__()
self.html = deepcopy(htmldict)
if "multicol" not in htmldict:
self.html["multicol"] = True
if "table_id" not in htmldict:
self.html["table_id"] = 1
self.inputter.html = self.html
def read(self, table):
"""
Read the ``table`` in HTML format and return a resulting ``Table``.
"""
self.outputter = HTMLOutputter()
return super().read(table)
def write(self, table):
"""
Return data in ``table`` converted to HTML as a list of strings.
"""
# Check that table has only 1-d or 2-d columns. Above that fails.
self._check_multidim_table(table)
cols = list(table.columns.values())
self.data.header.cols = cols
self.data.cols = cols
if isinstance(self.data.fill_values, tuple):
self.data.fill_values = [self.data.fill_values]
self.data._set_fill_values(cols)
self.data._set_col_formats()
lines = []
# Set HTML escaping to False for any column in the raw_html_cols input
raw_html_cols = self.html.get("raw_html_cols", [])
if isinstance(raw_html_cols, str):
raw_html_cols = [raw_html_cols] # Allow for a single string as input
cols_escaped = [col.info.name not in raw_html_cols for col in cols]
# Kwargs that get passed on to bleach.clean() if that is available.
raw_html_clean_kwargs = self.html.get("raw_html_clean_kwargs", {})
# Use XMLWriter to output HTML to lines
w = writer.XMLWriter(ListWriter(lines))
with w.tag("html"):
with w.tag("head"):
# Declare encoding and set CSS style for table
with w.tag("meta", attrib={"charset": "utf-8"}):
pass
with w.tag(
"meta",
attrib={
"http-equiv": "Content-type",
"content": "text/html;charset=UTF-8",
},
):
pass
if "css" in self.html:
with w.tag("style"):
w.data(self.html["css"])
if "cssfiles" in self.html:
for filename in self.html["cssfiles"]:
with w.tag(
"link", rel="stylesheet", href=filename, type="text/css"
):
pass
if "jsfiles" in self.html:
for filename in self.html["jsfiles"]:
with w.tag("script", src=filename):
# need this instead of pass to get <script></script>
w.data("")
with w.tag("body"):
if "js" in self.html:
with w.xml_cleaning_method("none"):
with w.tag("script"):
w.data(self.html["js"])
if isinstance(self.html["table_id"], str):
html_table_id = self.html["table_id"]
else:
html_table_id = None
if "table_class" in self.html:
html_table_class = self.html["table_class"]
attrib = {"class": html_table_class}
else:
attrib = {}
with w.tag("table", id=html_table_id, attrib=attrib):
with w.tag("thead"):
with w.tag("tr"):
for col in cols:
if len(col.shape) > 1 and self.html["multicol"]:
# Set colspan attribute for multicolumns
w.start("th", colspan=col.shape[1])
else:
w.start("th")
w.data(col.info.name.strip())
w.end(indent=False)
col_str_iters = []
new_cols_escaped = []
# Make a container to hold any new_col objects created
# below for multicolumn elements. This is purely to
# maintain a reference for these objects during
# subsequent iteration to format column values. This
# requires that the weakref info._parent be maintained.
new_cols = []
for col, col_escaped in zip(cols, cols_escaped):
if len(col.shape) > 1 and self.html["multicol"]:
span = col.shape[1]
for i in range(span):
# Split up multicolumns into separate columns
new_col = Column([el[i] for el in col])
new_col_iter_str_vals = self.fill_values(
col, new_col.info.iter_str_vals()
)
col_str_iters.append(new_col_iter_str_vals)
new_cols_escaped.append(col_escaped)
new_cols.append(new_col)
else:
col_iter_str_vals = self.fill_values(
col, col.info.iter_str_vals()
)
col_str_iters.append(col_iter_str_vals)
new_cols_escaped.append(col_escaped)
for row in zip(*col_str_iters):
with w.tag("tr"):
for el, col_escaped in zip(row, new_cols_escaped):
# Potentially disable HTML escaping for column
method = "escape_xml" if col_escaped else "bleach_clean"
with w.xml_cleaning_method(
method, **raw_html_clean_kwargs
):
w.start("td")
w.data(el.strip())
w.end(indent=False)
# Fixes XMLWriter's insertion of unwanted line breaks
return ["".join(lines)]
def fill_values(self, col, col_str_iters):
"""
Return an iterator of the values with replacements based on fill_values.
"""
# check if the col is a masked column and has fill values
is_masked_column = hasattr(col, "mask")
has_fill_values = hasattr(col, "fill_values")
for idx, col_str in enumerate(col_str_iters):
if is_masked_column and has_fill_values:
if col.mask[idx]:
yield col.fill_values[core.masked]
continue
if has_fill_values:
if col_str in col.fill_values:
yield col.fill_values[col_str]
continue
yield col_str
|
cb9468c4768fe9de4087775a6fb6145df573b1dc397148143a7f344af06b6b29 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""sextractor.py:
Classes to read SExtractor table format.
Built on daophot.py:
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import re
from . import core
class SExtractorHeader(core.BaseHeader):
"""Read the header from a file produced by SExtractor."""
comment = r"^\s*#\s*\S\D.*" # Find lines that don't have "# digit"
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines`` for a SExtractor
header. The SExtractor header is specialized so that we just copy the entire BaseHeader
get_cols routine and modify as needed.
Parameters
----------
lines : list
List of table lines
"""
# This assumes that the columns are listed in order, one per line with a
# header comment string of the format: "# 1 ID short description [unit]"
# However, some may be missing and must be inferred from skipped column numbers
columns = {}
# E.g. '# 1 ID identification number' (no units) or '# 2 MAGERR magnitude of error [mag]'
# Updated along with issue #4603, for more robust parsing of unit
re_name_def = re.compile(
r"""^\s* \# \s* # possible whitespace around #
(?P<colnumber> [0-9]+)\s+ # number of the column in table
(?P<colname> [-\w]+) # name of the column
# column description, match any character until...
(?:\s+(?P<coldescr> \w .+)
# ...until [non-space][space][unit] or [not-right-bracket][end]
(?:(?<!(\]))$|(?=(?:(?<=\S)\s+\[.+\]))))?
(?:\s*\[(?P<colunit>.+)\])?.* # match units in brackets
""",
re.VERBOSE,
)
dataline = None
for line in lines:
if not line.startswith("#"):
dataline = line # save for later to infer the actual number of columns
break # End of header lines
else:
match = re_name_def.search(line)
if match:
colnumber = int(match.group("colnumber"))
colname = match.group("colname")
coldescr = match.group("coldescr")
# If no units are given, colunit = None
colunit = match.group("colunit")
columns[colnumber] = (colname, coldescr, colunit)
# Handle skipped column numbers
colnumbers = sorted(columns)
# Handle the case where the last column is array-like by append a pseudo column
# If there are more data columns than the largest column number
# then add a pseudo-column that will be dropped later. This allows
# the array column logic below to work in all cases.
if dataline is not None:
n_data_cols = len(dataline.split())
else:
# handles no data, where we have to rely on the last column number
n_data_cols = colnumbers[-1]
# sextractor column number start at 1.
columns[n_data_cols + 1] = (None, None, None)
colnumbers.append(n_data_cols + 1)
if len(columns) > 1:
# only fill in skipped columns when there is genuine column initially
previous_column = 0
for n in colnumbers:
if n != previous_column + 1:
for c in range(previous_column + 1, n):
column_name = (
columns[previous_column][0] + f"_{c - previous_column}"
)
column_descr = columns[previous_column][1]
column_unit = columns[previous_column][2]
columns[c] = (column_name, column_descr, column_unit)
previous_column = n
# Add the columns in order to self.names
colnumbers = sorted(columns)[:-1] # drop the pseudo column
self.names = []
for n in colnumbers:
self.names.append(columns[n][0])
if not self.names:
raise core.InconsistentTableError(
"No column names found in SExtractor header"
)
self.cols = []
for n in colnumbers:
col = core.Column(name=columns[n][0])
col.description = columns[n][1]
col.unit = columns[n][2]
self.cols.append(col)
class SExtractorData(core.BaseData):
start_line = 0
delimiter = " "
comment = r"\s*#"
class SExtractor(core.BaseReader):
"""SExtractor format table.
SExtractor is a package for faint-galaxy photometry (Bertin & Arnouts
1996, A&A Supp. 317, 393.)
See: https://sextractor.readthedocs.io/en/latest/
Example::
# 1 NUMBER
# 2 ALPHA_J2000
# 3 DELTA_J2000
# 4 FLUX_RADIUS
# 7 MAG_AUTO [mag]
# 8 X2_IMAGE Variance along x [pixel**2]
# 9 X_MAMA Barycenter position along MAMA x axis [m**(-6)]
# 10 MU_MAX Peak surface brightness above background [mag * arcsec**(-2)]
1 32.23222 10.1211 0.8 1.2 1.4 18.1 1000.0 0.00304 -3.498
2 38.12321 -88.1321 2.2 2.4 3.1 17.0 1500.0 0.00908 1.401
Note the skipped numbers since flux_radius has 3 columns. The three
FLUX_RADIUS columns will be named FLUX_RADIUS, FLUX_RADIUS_1, FLUX_RADIUS_2
Also note that a post-ID description (e.g. "Variance along x") is optional
and that units may be specified at the end of a line in brackets.
"""
_format_name = "sextractor"
_io_registry_can_write = False
_description = "SExtractor format table"
header_class = SExtractorHeader
data_class = SExtractorData
inputter_class = core.ContinuationLinesInputter
def read(self, table):
"""
Read input data (file-like object, filename, list of strings, or
single string) into a Table and return the result.
"""
out = super().read(table)
# remove the comments
if "comments" in out.meta:
del out.meta["comments"]
return out
def write(self, table):
raise NotImplementedError
|
db64d64d72e032b6975da362dbd55f875f10942fbb9c868b14f96c5de06f2cb7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains functions for reading and writing QDP tables that are
not meant to be used directly, but instead are available as readers/writers in
`astropy.table`. See :ref:`astropy:table_io` for more details.
"""
import copy
import re
import warnings
from collections.abc import Iterable
import numpy as np
from astropy.table import Table
from astropy.utils.exceptions import AstropyUserWarning
from . import basic, core
def _line_type(line, delimiter=None):
"""Interpret a QDP file line.
Parameters
----------
line : str
a single line of the file
Returns
-------
type : str
Line type: "comment", "command", or "data"
Examples
--------
>>> _line_type("READ SERR 3")
'command'
>>> _line_type(" \\n !some gibberish")
'comment'
>>> _line_type(" ")
'comment'
>>> _line_type(" 21345.45")
'data,1'
>>> _line_type(" 21345.45 1.53e-3 1e-3 .04 NO nan")
'data,6'
>>> _line_type(" 21345.45,1.53e-3,1e-3,.04,NO,nan", delimiter=',')
'data,6'
>>> _line_type(" 21345.45 ! a comment to disturb")
'data,1'
>>> _line_type("NO NO NO NO NO")
'new'
>>> _line_type("NO,NO,NO,NO,NO", delimiter=',')
'new'
>>> _line_type("N O N NOON OON O")
Traceback (most recent call last):
...
ValueError: Unrecognized QDP line...
>>> _line_type(" some non-comment gibberish")
Traceback (most recent call last):
...
ValueError: Unrecognized QDP line...
"""
_decimal_re = r"[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?"
_command_re = r"READ [TS]ERR(\s+[0-9]+)+"
sep = delimiter
if delimiter is None:
sep = r"\s+"
_new_re = rf"NO({sep}NO)+"
_data_re = rf"({_decimal_re}|NO|[-+]?nan)({sep}({_decimal_re}|NO|[-+]?nan))*)"
_type_re = rf"^\s*((?P<command>{_command_re})|(?P<new>{_new_re})|(?P<data>{_data_re})?\s*(\!(?P<comment>.*))?\s*$"
_line_type_re = re.compile(_type_re)
line = line.strip()
if not line:
return "comment"
match = _line_type_re.match(line)
if match is None:
raise ValueError(f"Unrecognized QDP line: {line}")
for type_, val in match.groupdict().items():
if val is None:
continue
if type_ == "data":
return f"data,{len(val.split(sep=delimiter))}"
else:
return type_
def _get_type_from_list_of_lines(lines, delimiter=None):
"""Read through the list of QDP file lines and label each line by type.
Parameters
----------
lines : list
List containing one file line in each entry
Returns
-------
contents : list
List containing the type for each line (see `line_type_and_data`)
ncol : int
The number of columns in the data lines. Must be the same throughout
the file
Examples
--------
>>> line0 = "! A comment"
>>> line1 = "543 12 456.0"
>>> lines = [line0, line1]
>>> types, ncol = _get_type_from_list_of_lines(lines)
>>> types[0]
'comment'
>>> types[1]
'data,3'
>>> ncol
3
>>> lines.append("23")
>>> _get_type_from_list_of_lines(lines)
Traceback (most recent call last):
...
ValueError: Inconsistent number of columns
"""
types = [_line_type(line, delimiter=delimiter) for line in lines]
current_ncol = None
for type_ in types:
if type_.startswith("data,"):
ncol = int(type_[5:])
if current_ncol is None:
current_ncol = ncol
elif ncol != current_ncol:
raise ValueError("Inconsistent number of columns")
return types, current_ncol
def _get_lines_from_file(qdp_file):
if "\n" in qdp_file:
lines = qdp_file.split("\n")
elif isinstance(qdp_file, str):
with open(qdp_file) as fobj:
lines = [line.strip() for line in fobj.readlines()]
elif isinstance(qdp_file, Iterable):
lines = qdp_file
else:
raise ValueError("invalid value of qdb_file")
return lines
def _interpret_err_lines(err_specs, ncols, names=None):
"""Give list of column names from the READ SERR and TERR commands.
Parameters
----------
err_specs : dict
``{'serr': [n0, n1, ...], 'terr': [n2, n3, ...]}``
Error specifications for symmetric and two-sided errors
ncols : int
Number of data columns
Other Parameters
----------------
names : list of str
Name of data columns (defaults to ['col1', 'col2', ...]), _not_
including error columns.
Returns
-------
colnames : list
List containing the column names. Error columns will have the name
of the main column plus ``_err`` for symmetric errors, and ``_perr``
and ``_nerr`` for positive and negative errors respectively
Examples
--------
>>> col_in = ['MJD', 'Rate']
>>> cols = _interpret_err_lines(None, 2, names=col_in)
>>> cols[0]
'MJD'
>>> err_specs = {'terr': [1], 'serr': [2]}
>>> ncols = 5
>>> cols = _interpret_err_lines(err_specs, ncols, names=col_in)
>>> cols[0]
'MJD'
>>> cols[2]
'MJD_nerr'
>>> cols[4]
'Rate_err'
>>> _interpret_err_lines(err_specs, 6, names=col_in)
Traceback (most recent call last):
...
ValueError: Inconsistent number of input colnames
"""
colnames = ["" for i in range(ncols)]
if err_specs is None:
serr_cols = terr_cols = []
else:
# I don't want to empty the original one when using `pop` below
err_specs = copy.deepcopy(err_specs)
serr_cols = err_specs.pop("serr", [])
terr_cols = err_specs.pop("terr", [])
if names is not None:
all_error_cols = len(serr_cols) + len(terr_cols) * 2
if all_error_cols + len(names) != ncols:
raise ValueError("Inconsistent number of input colnames")
shift = 0
for i in range(ncols):
col_num = i + 1 - shift
if colnames[i] != "":
continue
colname_root = f"col{col_num}"
if names is not None:
colname_root = names[col_num - 1]
colnames[i] = f"{colname_root}"
if col_num in serr_cols:
colnames[i + 1] = f"{colname_root}_err"
shift += 1
continue
if col_num in terr_cols:
colnames[i + 1] = f"{colname_root}_perr"
colnames[i + 2] = f"{colname_root}_nerr"
shift += 2
continue
assert not np.any([c == "" for c in colnames])
return colnames
def _get_tables_from_qdp_file(qdp_file, input_colnames=None, delimiter=None):
"""Get all tables from a QDP file.
Parameters
----------
qdp_file : str
Input QDP file name
Other Parameters
----------------
input_colnames : list of str
Name of data columns (defaults to ['col1', 'col2', ...]), _not_
including error columns.
delimiter : str
Delimiter for the values in the table.
Returns
-------
list of `~astropy.table.Table`
List containing all the tables present inside the QDP file
"""
lines = _get_lines_from_file(qdp_file)
contents, ncol = _get_type_from_list_of_lines(lines, delimiter=delimiter)
table_list = []
err_specs = {}
colnames = None
comment_text = ""
initial_comments = ""
command_lines = ""
current_rows = None
for line, datatype in zip(lines, contents):
line = line.strip().lstrip("!")
# Is this a comment?
if datatype == "comment":
comment_text += line + "\n"
continue
if datatype == "command":
# The first time I find commands, I save whatever comments into
# The initial comments.
if command_lines == "":
initial_comments = comment_text
comment_text = ""
if err_specs != {}:
warnings.warn(
"This file contains multiple command blocks. Please verify",
AstropyUserWarning,
)
command_lines += line + "\n"
continue
if datatype.startswith("data"):
# The first time I find data, I define err_specs
if err_specs == {} and command_lines != "":
for cline in command_lines.strip().split("\n"):
command = cline.strip().split()
# This should never happen, but just in case.
if len(command) < 3:
continue
err_specs[command[1].lower()] = [int(c) for c in command[2:]]
if colnames is None:
colnames = _interpret_err_lines(err_specs, ncol, names=input_colnames)
if current_rows is None:
current_rows = []
values = []
for v in line.split(delimiter):
if v == "NO":
values.append(np.ma.masked)
else:
# Understand if number is int or float
try:
values.append(int(v))
except ValueError:
values.append(float(v))
current_rows.append(values)
continue
if datatype == "new":
# Save table to table_list and reset
if current_rows is not None:
new_table = Table(names=colnames, rows=current_rows)
new_table.meta["initial_comments"] = initial_comments.strip().split(
"\n"
)
new_table.meta["comments"] = comment_text.strip().split("\n")
# Reset comments
comment_text = ""
table_list.append(new_table)
current_rows = None
continue
# At the very end, if there is still a table being written, let's save
# it to the table_list
if current_rows is not None:
new_table = Table(names=colnames, rows=current_rows)
new_table.meta["initial_comments"] = initial_comments.strip().split("\n")
new_table.meta["comments"] = comment_text.strip().split("\n")
table_list.append(new_table)
return table_list
def _understand_err_col(colnames):
"""Get which column names are error columns.
Examples
--------
>>> colnames = ['a', 'a_err', 'b', 'b_perr', 'b_nerr']
>>> serr, terr = _understand_err_col(colnames)
>>> np.allclose(serr, [1])
True
>>> np.allclose(terr, [2])
True
>>> serr, terr = _understand_err_col(['a', 'a_nerr'])
Traceback (most recent call last):
...
ValueError: Missing positive error...
>>> serr, terr = _understand_err_col(['a', 'a_perr'])
Traceback (most recent call last):
...
ValueError: Missing negative error...
"""
shift = 0
serr = []
terr = []
for i, col in enumerate(colnames):
if col.endswith("_err"):
# The previous column, but they're numbered from 1!
# Plus, take shift into account
serr.append(i - shift)
shift += 1
elif col.endswith("_perr"):
terr.append(i - shift)
if len(colnames) == i + 1 or not colnames[i + 1].endswith("_nerr"):
raise ValueError("Missing negative error")
shift += 2
elif col.endswith("_nerr") and not colnames[i - 1].endswith("_perr"):
raise ValueError("Missing positive error")
return serr, terr
def _read_table_qdp(qdp_file, names=None, table_id=None, delimiter=None):
"""Read a table from a QDP file.
Parameters
----------
qdp_file : str
Input QDP file name
Other Parameters
----------------
names : list of str
Name of data columns (defaults to ['col1', 'col2', ...]), _not_
including error columns.
table_id : int, default 0
Number of the table to be read from the QDP file. This is useful
when multiple tables present in the file. By default, the first is read.
delimiter : str
Any delimiter accepted by the `sep` argument of str.split()
Returns
-------
tables : list of `~astropy.table.Table`
List containing all the tables present inside the QDP file
"""
if table_id is None:
warnings.warn(
"table_id not specified. Reading the first available table",
AstropyUserWarning,
)
table_id = 0
tables = _get_tables_from_qdp_file(
qdp_file, input_colnames=names, delimiter=delimiter
)
return tables[table_id]
def _write_table_qdp(table, filename=None, err_specs=None):
"""Write a table to a QDP file.
Parameters
----------
table : :class:`~astropy.table.Table`
Input table to be written
filename : str
Output QDP file name
Other Parameters
----------------
err_specs : dict
Dictionary of the format {'serr': [1], 'terr': [2, 3]}, specifying
which columns have symmetric and two-sided errors (see QDP format
specification)
"""
import io
fobj = io.StringIO()
if "initial_comments" in table.meta and table.meta["initial_comments"] != []:
for line in table.meta["initial_comments"]:
line = line.strip()
if not line.startswith("!"):
line = "!" + line
print(line, file=fobj)
if err_specs is None:
serr_cols, terr_cols = _understand_err_col(table.colnames)
else:
serr_cols = err_specs.pop("serr", [])
terr_cols = err_specs.pop("terr", [])
if serr_cols != []:
col_string = " ".join([str(val) for val in serr_cols])
print(f"READ SERR {col_string}", file=fobj)
if terr_cols != []:
col_string = " ".join([str(val) for val in terr_cols])
print(f"READ TERR {col_string}", file=fobj)
if "comments" in table.meta and table.meta["comments"] != []:
for line in table.meta["comments"]:
line = line.strip()
if not line.startswith("!"):
line = "!" + line
print(line, file=fobj)
colnames = table.colnames
print("!" + " ".join(colnames), file=fobj)
for row in table:
values = []
for val in row:
if not np.ma.is_masked(val):
rep = str(val)
else:
rep = "NO"
values.append(rep)
print(" ".join(values), file=fobj)
full_string = fobj.getvalue()
fobj.close()
if filename is not None:
with open(filename, "w") as fobj:
print(full_string, file=fobj)
return full_string.split("\n")
class QDPSplitter(core.DefaultSplitter):
"""
Split on space for QDP tables.
"""
delimiter = " "
class QDPHeader(basic.CommentedHeaderHeader):
"""
Header that uses the :class:`astropy.io.ascii.basic.QDPSplitter`.
"""
splitter_class = QDPSplitter
comment = "!"
write_comment = "!"
class QDPData(basic.BasicData):
"""
Data that uses the :class:`astropy.io.ascii.basic.CsvSplitter`.
"""
splitter_class = QDPSplitter
fill_values = [(core.masked, "NO")]
comment = "!"
write_comment = None
class QDP(basic.Basic):
"""Quick and Dandy Plot table.
Example::
! Initial comment line 1
! Initial comment line 2
READ TERR 1
READ SERR 3
! Table 0 comment
!a a(pos) a(neg) b be c d
53000.5 0.25 -0.5 1 1.5 3.5 2
54000.5 1.25 -1.5 2 2.5 4.5 3
NO NO NO NO NO
! Table 1 comment
!a a(pos) a(neg) b be c d
54000.5 2.25 -2.5 NO 3.5 5.5 5
55000.5 3.25 -3.5 4 4.5 6.5 nan
The input table above contains some initial comments, the error commands,
then two tables.
This file format can contain multiple tables, separated by a line full
of ``NO``s. Comments are exclamation marks, and missing values are single
``NO`` entries. The delimiter is usually whitespace, more rarely a comma.
The QDP format differentiates between data and error columns. The table
above has commands::
READ TERR 1
READ SERR 3
which mean that after data column 1 there will be two error columns
containing its positive and engative error bars, then data column 2 without
error bars, then column 3, then a column with the symmetric error of column
3, then the remaining data columns.
As explained below, table headers are highly inconsistent. Possible
comments containing column names will be ignored and columns will be called
``col1``, ``col2``, etc. unless the user specifies their names with the
``names=`` keyword argument,
When passing column names, pass **only the names of the data columns, not
the error columns.**
Error information will be encoded in the names of the table columns.
(e.g. ``a_perr`` and ``a_nerr`` for the positive and negative error of
column ``a``, ``b_err`` the symmetric error of column ``b``.)
When writing tables to this format, users can pass an ``err_specs`` keyword
passing a dictionary ``{'serr': [3], 'terr': [1, 2]}``, meaning that data
columns 1 and two will have two additional columns each with their positive
and negative errors, and data column 3 will have an additional column with
a symmetric error (just like the ``READ SERR`` and ``READ TERR`` commands
above)
Headers are just comments, and tables distributed by various missions
can differ greatly in their use of conventions. For example, light curves
distributed by the Swift-Gehrels mission have an extra space in one header
entry that makes the number of labels inconsistent with the number of cols.
For this reason, we ignore the comments that might encode the column names
and leave the name specification to the user.
Example::
> Extra space
> |
> v
>! MJD Err (pos) Err(neg) Rate Error
>53000.123456 2.378e-05 -2.378472e-05 NO 0.212439
These readers and writer classes will strive to understand which of the
comments belong to all the tables, and which ones to each single table.
General comments will be stored in the ``initial_comments`` meta of each
table. The comments of each table will be stored in the ``comments`` meta.
Example::
t = Table.read(example_qdp, format='ascii.qdp', table_id=1, names=['a', 'b', 'c', 'd'])
reads the second table (``table_id=1``) in file ``example.qdp`` containing
the table above. There are four column names but seven data columns, why?
Because the ``READ SERR`` and ``READ TERR`` commands say that there are
three error columns.
``t.meta['initial_comments']`` will contain the initial two comment lines
in the file, while ``t.meta['comments']`` will contain ``Table 1 comment``
The table can be written to another file, preserving the same information,
as::
t.write(test_file, err_specs={'terr': [1], 'serr': [3]})
Note how the ``terr`` and ``serr`` commands are passed to the writer.
"""
_format_name = "qdp"
_io_registry_can_write = True
_io_registry_suffix = ".qdp"
_description = "Quick and Dandy Plotter"
header_class = QDPHeader
data_class = QDPData
def __init__(self, table_id=None, names=None, err_specs=None, sep=None):
super().__init__()
self.table_id = table_id
self.names = names
self.err_specs = err_specs
self.delimiter = sep
def read(self, table):
self.lines = self.inputter.get_lines(table, newline="\n")
return _read_table_qdp(
self.lines,
table_id=self.table_id,
names=self.names,
delimiter=self.delimiter,
)
def write(self, table):
self._check_multidim_table(table)
lines = _write_table_qdp(table, err_specs=self.err_specs)
return lines
|
93da0dc5d05063d5db3ccbb710f03d8378a3f7bd6a14795a112a26e8695c6d64 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
ui.py:
Provides the main user functions for reading and writing tables.
:Copyright: Smithsonian Astrophysical Observatory (2010)
:Author: Tom Aldcroft ([email protected])
"""
import collections
import contextlib
import copy
import os
import re
import sys
import time
import warnings
from io import StringIO
import numpy as np
from astropy.table import Table
from astropy.utils.data import get_readable_fileobj
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.misc import NOT_OVERWRITING_MSG
from . import (
basic,
cds,
core,
cparser,
daophot,
ecsv,
fastbasic,
fixedwidth,
html,
ipac,
latex,
mrt,
rst,
sextractor,
)
from .docs import READ_KWARG_TYPES, WRITE_KWARG_TYPES
_read_trace = []
# Default setting for guess parameter in read()
_GUESS = True
def _probably_html(table, maxchars=100000):
"""
Determine if ``table`` probably contains HTML content. See PR #3693 and issue
#3691 for context.
"""
if not isinstance(table, str):
try:
# If table is an iterable (list of strings) then take the first
# maxchars of these. Make sure this is something with random
# access to exclude a file-like object
table[0]
table[:1]
size = 0
for i, line in enumerate(table):
size += len(line)
if size > maxchars:
table = table[: i + 1]
break
table = os.linesep.join(table)
except Exception:
pass
if isinstance(table, str):
# Look for signs of an HTML table in the first maxchars characters
table = table[:maxchars]
# URL ending in .htm or .html
if re.match(
r"( http[s]? | ftp | file ) :// .+ \.htm[l]?$",
table,
re.IGNORECASE | re.VERBOSE,
):
return True
# Filename ending in .htm or .html which exists
if re.search(r"\.htm[l]?$", table[-5:], re.IGNORECASE) and os.path.exists(
os.path.expanduser(table)
):
return True
# Table starts with HTML document type declaration
if re.match(r"\s* <! \s* DOCTYPE \s* HTML", table, re.IGNORECASE | re.VERBOSE):
return True
# Look for <TABLE .. >, <TR .. >, <TD .. > tag openers.
if all(
re.search(rf"< \s* {element} [^>]* >", table, re.IGNORECASE | re.VERBOSE)
for element in ("table", "tr", "td")
):
return True
return False
def set_guess(guess):
"""
Set the default value of the ``guess`` parameter for read().
Parameters
----------
guess : bool
New default ``guess`` value (e.g., True or False)
"""
global _GUESS
_GUESS = guess
def get_reader(Reader=None, Inputter=None, Outputter=None, **kwargs):
"""
Initialize a table reader allowing for common customizations. Most of the
default behavior for various parameters is determined by the Reader class.
Parameters
----------
Reader : `~astropy.io.ascii.BaseReader`
Reader class (DEPRECATED). Default is :class:`Basic`.
Inputter : `~astropy.io.ascii.BaseInputter`
Inputter class
Outputter : `~astropy.io.ascii.BaseOutputter`
Outputter class
delimiter : str
Column delimiter string
comment : str
Regular expression defining a comment line in table
quotechar : str
One-character string to quote fields containing special characters
header_start : int
Line index for the header line not counting comment or blank lines.
A line with only whitespace is considered blank.
data_start : int
Line index for the start of data not counting comment or blank lines.
A line with only whitespace is considered blank.
data_end : int
Line index for the end of data not counting comment or blank lines.
This value can be negative to count from the end.
converters : dict
Dict of converters.
data_Splitter : `~astropy.io.ascii.BaseSplitter`
Splitter class to split data columns.
header_Splitter : `~astropy.io.ascii.BaseSplitter`
Splitter class to split header columns.
names : list
List of names corresponding to each data column.
include_names : list, optional
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``).
fill_values : tuple, list of tuple
Specification of fill values for bad or missing table values.
fill_include_names : list
List of names to include in fill_values.
fill_exclude_names : list
List of names to exclude from fill_values (applied after ``fill_include_names``).
Returns
-------
reader : `~astropy.io.ascii.BaseReader` subclass
ASCII format reader instance
"""
# This function is a light wrapper around core._get_reader to provide a
# public interface with a default Reader.
if Reader is None:
# Default reader is Basic unless fast reader is forced
fast_reader = _get_fast_reader_dict(kwargs)
if fast_reader["enable"] == "force":
Reader = fastbasic.FastBasic
else:
Reader = basic.Basic
reader = core._get_reader(Reader, Inputter=Inputter, Outputter=Outputter, **kwargs)
return reader
def _get_format_class(format, ReaderWriter, label):
if format is not None and ReaderWriter is not None:
raise ValueError(f"Cannot supply both format and {label} keywords")
if format is not None:
if format in core.FORMAT_CLASSES:
ReaderWriter = core.FORMAT_CLASSES[format]
else:
raise ValueError(
"ASCII format {!r} not in allowed list {}".format(
format, sorted(core.FORMAT_CLASSES)
)
)
return ReaderWriter
def _get_fast_reader_dict(kwargs):
"""Convert 'fast_reader' key in kwargs into a dict if not already and make sure
'enable' key is available.
"""
fast_reader = copy.deepcopy(kwargs.get("fast_reader", True))
if isinstance(fast_reader, dict):
fast_reader.setdefault("enable", "force")
else:
fast_reader = {"enable": fast_reader}
return fast_reader
def _validate_read_write_kwargs(read_write, **kwargs):
"""Validate types of keyword arg inputs to read() or write()."""
def is_ducktype(val, cls):
"""Check if ``val`` is an instance of ``cls`` or "seems" like one:
``cls(val) == val`` does not raise and exception and is `True`. In
this way you can pass in ``np.int16(2)`` and have that count as `int`.
This has a special-case of ``cls`` being 'list-like', meaning it is
an iterable but not a string.
"""
if cls == "list-like":
ok = not isinstance(val, str) and isinstance(val, collections.abc.Iterable)
else:
ok = isinstance(val, cls)
if not ok:
# See if ``val`` walks and quacks like a ``cls```.
try:
new_val = cls(val)
assert new_val == val
except Exception:
ok = False
else:
ok = True
return ok
kwarg_types = READ_KWARG_TYPES if read_write == "read" else WRITE_KWARG_TYPES
for arg, val in kwargs.items():
# Kwarg type checking is opt-in, so kwargs not in the list are considered OK.
# This reflects that some readers allow additional arguments that may not
# be well-specified, e.g. ```__init__(self, **kwargs)`` is an option.
if arg not in kwarg_types or val is None:
continue
# Single type or tuple of types for this arg (like isinstance())
types = kwarg_types[arg]
err_msg = (
f"{read_write}() argument '{arg}' must be a "
f"{types} object, got {type(val)} instead"
)
# Force `types` to be a tuple for the any() check below
if not isinstance(types, tuple):
types = (types,)
if not any(is_ducktype(val, cls) for cls in types):
raise TypeError(err_msg)
def _expand_user_if_path(argument):
if isinstance(argument, (str, bytes, os.PathLike)):
# For the `read()` method, a `str` input can be either a file path or
# the table data itself. File names for io.ascii cannot have newlines
# in them and io.ascii does not accept table data as `bytes`, so we can
# attempt to detect data strings like this.
is_str_data = isinstance(argument, str) and (
"\n" in argument or "\r" in argument
)
if not is_str_data:
# Remain conservative in expanding the presumed-path
ex_user = os.path.expanduser(argument)
if os.path.exists(ex_user):
argument = ex_user
return argument
def read(table, guess=None, **kwargs):
# This the final output from reading. Static analysis indicates the reading
# logic (which is indeed complex) might not define `dat`, thus do so here.
dat = None
# Docstring defined below
del _read_trace[:]
# Downstream readers might munge kwargs
kwargs = copy.deepcopy(kwargs)
_validate_read_write_kwargs("read", **kwargs)
# Convert 'fast_reader' key in kwargs into a dict if not already and make sure
# 'enable' key is available.
fast_reader = _get_fast_reader_dict(kwargs)
kwargs["fast_reader"] = fast_reader
if fast_reader["enable"] and fast_reader.get("chunk_size"):
return _read_in_chunks(table, **kwargs)
if "fill_values" not in kwargs:
kwargs["fill_values"] = [("", "0")]
# If an Outputter is supplied in kwargs that will take precedence.
if (
"Outputter" in kwargs
): # user specified Outputter, not supported for fast reading
fast_reader["enable"] = False
format = kwargs.get("format")
# Dictionary arguments are passed by reference per default and thus need
# special protection:
new_kwargs = copy.deepcopy(kwargs)
kwargs["fast_reader"] = copy.deepcopy(fast_reader)
# Get the Reader class based on possible format and Reader kwarg inputs.
Reader = _get_format_class(format, kwargs.get("Reader"), "Reader")
if Reader is not None:
new_kwargs["Reader"] = Reader
format = Reader._format_name
# Remove format keyword if there, this is only allowed in read() not get_reader()
if "format" in new_kwargs:
del new_kwargs["format"]
if guess is None:
guess = _GUESS
if guess:
# If ``table`` is probably an HTML file then tell guess function to add
# the HTML reader at the top of the guess list. This is in response to
# issue #3691 (and others) where libxml can segfault on a long non-HTML
# file, thus prompting removal of the HTML reader from the default
# guess list.
new_kwargs["guess_html"] = _probably_html(table)
# If `table` is a filename or readable file object then read in the
# file now. This prevents problems in Python 3 with the file object
# getting closed or left at the file end. See #3132, #3013, #3109,
# #2001. If a `readme` arg was passed that implies CDS format, in
# which case the original `table` as the data filename must be left
# intact.
if "readme" not in new_kwargs:
encoding = kwargs.get("encoding")
try:
table = _expand_user_if_path(table)
with get_readable_fileobj(table, encoding=encoding) as fileobj:
table = fileobj.read()
except ValueError: # unreadable or invalid binary file
raise
except Exception:
pass
else:
# Ensure that `table` has at least one \r or \n in it
# so that the core.BaseInputter test of
# ('\n' not in table and '\r' not in table)
# will fail and so `table` cannot be interpreted there
# as a filename. See #4160.
if not re.search(r"[\r\n]", table):
table = table + os.linesep
# If the table got successfully read then look at the content
# to see if is probably HTML, but only if it wasn't already
# identified as HTML based on the filename.
if not new_kwargs["guess_html"]:
new_kwargs["guess_html"] = _probably_html(table)
# Get the table from guess in ``dat``. If ``dat`` comes back as None
# then there was just one set of kwargs in the guess list so fall
# through below to the non-guess way so that any problems result in a
# more useful traceback.
dat = _guess(table, new_kwargs, format, fast_reader)
if dat is None:
guess = False
if not guess:
if format is None:
reader = get_reader(**new_kwargs)
format = reader._format_name
table = _expand_user_if_path(table)
# Try the fast reader version of `format` first if applicable. Note that
# if user specified a fast format (e.g. format='fast_basic') this test
# will fail and the else-clause below will be used.
if fast_reader["enable"] and f"fast_{format}" in core.FAST_CLASSES:
fast_kwargs = copy.deepcopy(new_kwargs)
fast_kwargs["Reader"] = core.FAST_CLASSES[f"fast_{format}"]
fast_reader_rdr = get_reader(**fast_kwargs)
try:
dat = fast_reader_rdr.read(table)
_read_trace.append(
{
"kwargs": copy.deepcopy(fast_kwargs),
"Reader": fast_reader_rdr.__class__,
"status": "Success with fast reader (no guessing)",
}
)
except (
core.ParameterError,
cparser.CParserError,
UnicodeEncodeError,
) as err:
# special testing value to avoid falling back on the slow reader
if fast_reader["enable"] == "force":
raise core.InconsistentTableError(
f"fast reader {fast_reader_rdr.__class__} exception: {err}"
)
# If the fast reader doesn't work, try the slow version
reader = get_reader(**new_kwargs)
dat = reader.read(table)
_read_trace.append(
{
"kwargs": copy.deepcopy(new_kwargs),
"Reader": reader.__class__,
"status": (
"Success with slow reader after failing"
" with fast (no guessing)"
),
}
)
else:
reader = get_reader(**new_kwargs)
dat = reader.read(table)
_read_trace.append(
{
"kwargs": copy.deepcopy(new_kwargs),
"Reader": reader.__class__,
"status": "Success with specified Reader class (no guessing)",
}
)
# Static analysis (pyright) indicates `dat` might be left undefined, so just
# to be sure define it at the beginning and check here.
if dat is None:
raise RuntimeError(
"read() function failed due to code logic error, "
"please report this bug on github"
)
return dat
read.__doc__ = core.READ_DOCSTRING
def _guess(table, read_kwargs, format, fast_reader):
"""
Try to read the table using various sets of keyword args. Start with the
standard guess list and filter to make it unique and consistent with
user-supplied read keyword args. Finally, if none of those work then
try the original user-supplied keyword args.
Parameters
----------
table : str, file-like, list
Input table as a file name, file-like object, list of strings, or
single newline-separated string.
read_kwargs : dict
Keyword arguments from user to be supplied to reader
format : str
Table format
fast_reader : dict
Options for the C engine fast reader. See read() function for details.
Returns
-------
dat : `~astropy.table.Table` or None
Output table or None if only one guess format was available
"""
# Keep a trace of all failed guesses kwarg
failed_kwargs = []
# Get an ordered list of read() keyword arg dicts that will be cycled
# through in order to guess the format.
full_list_guess = _get_guess_kwargs_list(read_kwargs)
# If a fast version of the reader is available, try that before the slow version
if (
fast_reader["enable"]
and format is not None
and f"fast_{format}" in core.FAST_CLASSES
):
fast_kwargs = copy.deepcopy(read_kwargs)
fast_kwargs["Reader"] = core.FAST_CLASSES[f"fast_{format}"]
full_list_guess = [fast_kwargs] + full_list_guess
else:
fast_kwargs = None
# Filter the full guess list so that each entry is consistent with user kwarg inputs.
# This also removes any duplicates from the list.
filtered_guess_kwargs = []
fast_reader = read_kwargs.get("fast_reader")
for guess_kwargs in full_list_guess:
# If user specified slow reader then skip all fast readers
if (
fast_reader["enable"] is False
and guess_kwargs["Reader"] in core.FAST_CLASSES.values()
):
_read_trace.append(
{
"kwargs": copy.deepcopy(guess_kwargs),
"Reader": guess_kwargs["Reader"].__class__,
"status": "Disabled: reader only available in fast version",
"dt": f"{0.0:.3f} ms",
}
)
continue
# If user required a fast reader then skip all non-fast readers
if (
fast_reader["enable"] == "force"
and guess_kwargs["Reader"] not in core.FAST_CLASSES.values()
):
_read_trace.append(
{
"kwargs": copy.deepcopy(guess_kwargs),
"Reader": guess_kwargs["Reader"].__class__,
"status": "Disabled: no fast version of reader available",
"dt": f"{0.0:.3f} ms",
}
)
continue
guess_kwargs_ok = True # guess_kwargs are consistent with user_kwargs?
for key, val in read_kwargs.items():
# Do guess_kwargs.update(read_kwargs) except that if guess_args has
# a conflicting key/val pair then skip this guess entirely.
if key not in guess_kwargs:
guess_kwargs[key] = copy.deepcopy(val)
elif val != guess_kwargs[key] and guess_kwargs != fast_kwargs:
guess_kwargs_ok = False
break
if not guess_kwargs_ok:
# User-supplied kwarg is inconsistent with the guess-supplied kwarg, e.g.
# user supplies delimiter="|" but the guess wants to try delimiter=" ",
# so skip the guess entirely.
continue
# Add the guess_kwargs to filtered list only if it is not already there.
if guess_kwargs not in filtered_guess_kwargs:
filtered_guess_kwargs.append(guess_kwargs)
# If there are not at least two formats to guess then return no table
# (None) to indicate that guessing did not occur. In that case the
# non-guess read() will occur and any problems will result in a more useful
# traceback.
if len(filtered_guess_kwargs) <= 1:
return None
# Define whitelist of exceptions that are expected from readers when
# processing invalid inputs. Note that OSError must fall through here
# so one cannot simply catch any exception.
guess_exception_classes = (
core.InconsistentTableError,
ValueError,
TypeError,
AttributeError,
core.OptionalTableImportError,
core.ParameterError,
cparser.CParserError,
)
# Now cycle through each possible reader and associated keyword arguments.
# Try to read the table using those args, and if an exception occurs then
# keep track of the failed guess and move on.
for guess_kwargs in filtered_guess_kwargs:
t0 = time.time()
try:
# If guessing will try all Readers then use strict req'ts on column names
if "Reader" not in read_kwargs:
guess_kwargs["strict_names"] = True
reader = get_reader(**guess_kwargs)
reader.guessing = True
dat = reader.read(table)
_read_trace.append(
{
"kwargs": copy.deepcopy(guess_kwargs),
"Reader": reader.__class__,
"status": "Success (guessing)",
"dt": f"{(time.time() - t0) * 1000:.3f} ms",
}
)
return dat
except guess_exception_classes as err:
_read_trace.append(
{
"kwargs": copy.deepcopy(guess_kwargs),
"status": f"{err.__class__.__name__}: {str(err)}",
"dt": f"{(time.time() - t0) * 1000:.3f} ms",
}
)
failed_kwargs.append(guess_kwargs)
else:
# Failed all guesses, try the original read_kwargs without column requirements
try:
reader = get_reader(**read_kwargs)
dat = reader.read(table)
_read_trace.append(
{
"kwargs": copy.deepcopy(read_kwargs),
"Reader": reader.__class__,
"status": (
"Success with original kwargs without strict_names (guessing)"
),
}
)
return dat
except guess_exception_classes as err:
_read_trace.append(
{
"kwargs": copy.deepcopy(read_kwargs),
"status": f"{err.__class__.__name__}: {str(err)}",
}
)
failed_kwargs.append(read_kwargs)
lines = [
"\nERROR: Unable to guess table format with the guesses listed below:"
]
for kwargs in failed_kwargs:
sorted_keys = sorted(
x for x in sorted(kwargs) if x not in ("Reader", "Outputter")
)
reader_repr = repr(kwargs.get("Reader", basic.Basic))
keys_vals = ["Reader:" + re.search(r"\.(\w+)'>", reader_repr).group(1)]
kwargs_sorted = ((key, kwargs[key]) for key in sorted_keys)
keys_vals.extend([f"{key}: {val!r}" for key, val in kwargs_sorted])
lines.append(" ".join(keys_vals))
msg = [
"",
"************************************************************************",
"** ERROR: Unable to guess table format with the guesses listed above. **",
"** **",
"** To figure out why the table did not read, use guess=False and **",
"** fast_reader=False, along with any appropriate arguments to read(). **",
"** In particular specify the format and any known attributes like the **",
"** delimiter. **",
"************************************************************************",
]
lines.extend(msg)
raise core.InconsistentTableError("\n".join(lines)) from None
def _get_guess_kwargs_list(read_kwargs):
"""Get the full list of reader keyword argument dicts.
These are the basis for the format guessing process.
The returned full list will then be:
- Filtered to be consistent with user-supplied kwargs
- Cleaned to have only unique entries
- Used one by one to try reading the input table
Note that the order of the guess list has been tuned over years of usage.
Maintainers need to be very careful about any adjustments as the
reasoning may not be immediately evident in all cases.
This list can (and usually does) include duplicates. This is a result
of the order tuning, but these duplicates get removed later.
Parameters
----------
read_kwargs : dict
User-supplied read keyword args
Returns
-------
guess_kwargs_list : list
List of read format keyword arg dicts
"""
guess_kwargs_list = []
# If the table is probably HTML based on some heuristics then start with the
# HTML reader.
if read_kwargs.pop("guess_html", None):
guess_kwargs_list.append(dict(Reader=html.HTML))
# Start with ECSV because an ECSV file will be read by Basic. This format
# has very specific header requirements and fails out quickly.
guess_kwargs_list.append(dict(Reader=ecsv.Ecsv))
# Now try readers that accept the user-supplied keyword arguments
# (actually include all here - check for compatibility of arguments later).
# FixedWidthTwoLine would also be read by Basic, so it needs to come first;
# same for RST.
for reader in (
fixedwidth.FixedWidthTwoLine,
rst.RST,
fastbasic.FastBasic,
basic.Basic,
fastbasic.FastRdb,
basic.Rdb,
fastbasic.FastTab,
basic.Tab,
cds.Cds,
mrt.Mrt,
daophot.Daophot,
sextractor.SExtractor,
ipac.Ipac,
latex.Latex,
latex.AASTex,
):
guess_kwargs_list.append(dict(Reader=reader))
# Cycle through the basic-style readers using all combinations of delimiter
# and quotechar.
for Reader in (
fastbasic.FastCommentedHeader,
basic.CommentedHeader,
fastbasic.FastBasic,
basic.Basic,
fastbasic.FastNoHeader,
basic.NoHeader,
):
for delimiter in ("|", ",", " ", r"\s"):
for quotechar in ('"', "'"):
guess_kwargs_list.append(
dict(Reader=Reader, delimiter=delimiter, quotechar=quotechar)
)
return guess_kwargs_list
def _read_in_chunks(table, **kwargs):
"""
For fast_reader read the ``table`` in chunks and vstack to create
a single table, OR return a generator of chunk tables.
"""
fast_reader = kwargs["fast_reader"]
chunk_size = fast_reader.pop("chunk_size")
chunk_generator = fast_reader.pop("chunk_generator", False)
fast_reader["parallel"] = False # No parallel with chunks
tbl_chunks = _read_in_chunks_generator(table, chunk_size, **kwargs)
if chunk_generator:
return tbl_chunks
tbl0 = next(tbl_chunks)
masked = tbl0.masked
# Numpy won't allow resizing the original so make a copy here.
out_cols = {col.name: col.data.copy() for col in tbl0.itercols()}
str_kinds = ("S", "U")
for tbl in tbl_chunks:
masked |= tbl.masked
for name, col in tbl.columns.items():
# Concatenate current column data and new column data
# If one of the inputs is string-like and the other is not, then
# convert the non-string to a string. In a perfect world this would
# be handled by numpy, but as of numpy 1.13 this results in a string
# dtype that is too long (https://github.com/numpy/numpy/issues/10062).
col1, col2 = out_cols[name], col.data
if col1.dtype.kind in str_kinds and col2.dtype.kind not in str_kinds:
col2 = np.array(col2.tolist(), dtype=col1.dtype.kind)
elif col2.dtype.kind in str_kinds and col1.dtype.kind not in str_kinds:
col1 = np.array(col1.tolist(), dtype=col2.dtype.kind)
# Choose either masked or normal concatenation
concatenate = np.ma.concatenate if masked else np.concatenate
out_cols[name] = concatenate([col1, col2])
# Make final table from numpy arrays, converting dict to list
out_cols = [out_cols[name] for name in tbl0.colnames]
out = tbl0.__class__(out_cols, names=tbl0.colnames, meta=tbl0.meta, copy=False)
return out
def _read_in_chunks_generator(table, chunk_size, **kwargs):
"""
For fast_reader read the ``table`` in chunks and return a generator
of tables for each chunk.
"""
@contextlib.contextmanager
def passthrough_fileobj(fileobj, encoding=None):
"""Stub for get_readable_fileobj, which does not seem to work in Py3
for input file-like object, see #6460.
"""
yield fileobj
# Set up to coerce `table` input into a readable file object by selecting
# an appropriate function.
# Convert table-as-string to a File object. Finding a newline implies
# that the string is not a filename.
if isinstance(table, str) and ("\n" in table or "\r" in table):
table = StringIO(table)
fileobj_context = passthrough_fileobj
elif hasattr(table, "read") and hasattr(table, "seek"):
fileobj_context = passthrough_fileobj
else:
# string filename or pathlib
fileobj_context = get_readable_fileobj
# Set up for iterating over chunks
kwargs["fast_reader"]["return_header_chars"] = True
header = "" # Table header (up to start of data)
prev_chunk_chars = "" # Chars from previous chunk after last newline
first_chunk = True # True for the first chunk, False afterward
with fileobj_context(table, encoding=kwargs.get("encoding")) as fh:
while True:
chunk = fh.read(chunk_size)
# Got fewer chars than requested, must be end of file
final_chunk = len(chunk) < chunk_size
# If this is the last chunk and there is only whitespace then break
if final_chunk and not re.search(r"\S", chunk):
break
# Step backwards from last character in chunk and find first newline
for idx in range(len(chunk) - 1, -1, -1):
if final_chunk or chunk[idx] == "\n":
break
else:
raise ValueError("no newline found in chunk (chunk_size too small?)")
# Stick on the header to the chunk part up to (and including) the
# last newline. Make sure the small strings are concatenated first.
complete_chunk = (header + prev_chunk_chars) + chunk[: idx + 1]
prev_chunk_chars = chunk[idx + 1 :]
# Now read the chunk as a complete table
tbl = read(complete_chunk, guess=False, **kwargs)
# For the first chunk pop the meta key which contains the header
# characters (everything up to the start of data) then fix kwargs
# so it doesn't return that in meta any more.
if first_chunk:
header = tbl.meta.pop("__ascii_fast_reader_header_chars__")
first_chunk = False
yield tbl
if final_chunk:
break
extra_writer_pars = (
"delimiter",
"comment",
"quotechar",
"formats",
"names",
"include_names",
"exclude_names",
"strip_whitespace",
)
def get_writer(Writer=None, fast_writer=True, **kwargs):
"""
Initialize a table writer allowing for common customizations. Most of the
default behavior for various parameters is determined by the Writer class.
Parameters
----------
Writer : ``Writer``
Writer class (DEPRECATED). Defaults to :class:`Basic`.
delimiter : str
Column delimiter string
comment : str
String defining a comment line in table
quotechar : str
One-character string to quote fields containing special characters
formats : dict
Dictionary of format specifiers or formatting functions
strip_whitespace : bool
Strip surrounding whitespace from column values.
names : list
List of names corresponding to each data column
include_names : list
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
fast_writer : bool
Whether to use the fast Cython writer.
Returns
-------
writer : `~astropy.io.ascii.BaseReader` subclass
ASCII format writer instance
"""
if Writer is None:
Writer = basic.Basic
if "strip_whitespace" not in kwargs:
kwargs["strip_whitespace"] = True
writer = core._get_writer(Writer, fast_writer, **kwargs)
# Handle the corner case of wanting to disable writing table comments for the
# commented_header format. This format *requires* a string for `write_comment`
# because that is used for the header column row, so it is not possible to
# set the input `comment` to None. Without adding a new keyword or assuming
# a default comment character, there is no other option but to tell user to
# simply remove the meta['comments'].
if isinstance(
writer, (basic.CommentedHeader, fastbasic.FastCommentedHeader)
) and not isinstance(kwargs.get("comment", ""), str):
raise ValueError(
"for the commented_header writer you must supply a string\n"
"value for the `comment` keyword. In order to disable writing\n"
"table comments use `del t.meta['comments']` prior to writing."
)
return writer
def write(
table,
output=None,
format=None,
Writer=None,
fast_writer=True,
*,
overwrite=False,
**kwargs,
):
# Docstring inserted below
_validate_read_write_kwargs(
"write", format=format, fast_writer=fast_writer, overwrite=overwrite, **kwargs
)
if isinstance(output, (str, bytes, os.PathLike)):
output = os.path.expanduser(output)
if not overwrite and os.path.lexists(output):
raise OSError(NOT_OVERWRITING_MSG.format(output))
if output is None:
output = sys.stdout
# Ensure that `table` is a Table subclass.
names = kwargs.get("names")
if isinstance(table, Table):
# While we are only going to read data from columns, we may need to
# to adjust info attributes such as format, so we make a shallow copy.
table = table.__class__(table, names=names, copy=False)
else:
# Otherwise, create a table from the input.
table = Table(table, names=names, copy=False)
table0 = table[:0].copy()
core._apply_include_exclude_names(
table0,
kwargs.get("names"),
kwargs.get("include_names"),
kwargs.get("exclude_names"),
)
diff_format_with_names = set(kwargs.get("formats", [])) - set(table0.colnames)
if diff_format_with_names:
warnings.warn(
"The key(s) {} specified in the formats argument do not match a column"
" name.".format(diff_format_with_names),
AstropyWarning,
)
if table.has_mixin_columns:
fast_writer = False
Writer = _get_format_class(format, Writer, "Writer")
writer = get_writer(Writer=Writer, fast_writer=fast_writer, **kwargs)
if writer._format_name in core.FAST_CLASSES:
writer.write(table, output)
return
lines = writer.write(table)
# Write the lines to output
outstr = os.linesep.join(lines)
if not hasattr(output, "write"):
# NOTE: we need to specify newline='', otherwise the default
# behavior is for Python to translate \r\n (which we write because
# of os.linesep) into \r\r\n. Specifying newline='' disables any
# auto-translation.
output = open(output, "w", newline="")
output.write(outstr)
output.write(os.linesep)
output.close()
else:
output.write(outstr)
output.write(os.linesep)
write.__doc__ = core.WRITE_DOCSTRING
def get_read_trace():
"""
Return a traceback of the attempted read formats for the last call to
`~astropy.io.ascii.read` where guessing was enabled. This is primarily for
debugging.
The return value is a list of dicts, where each dict includes the keyword
args ``kwargs`` used in the read call and the returned ``status``.
Returns
-------
trace : list of dict
Ordered list of format guesses and status
"""
return copy.deepcopy(_read_trace)
|
874fd27dfec6a0e68aa8905a8a0d48da7b654375e7705f09d96492e6be5afb87 | """A Collection of useful miscellaneous functions.
misc.py:
Collection of useful miscellaneous functions.
:Author: Hannes Breytenbach ([email protected])
"""
import collections.abc
import itertools
import operator
def first_true_index(iterable, pred=None, default=None):
"""find the first index position for the which the callable pred returns True."""
if pred is None:
func = operator.itemgetter(1)
else:
func = lambda x: pred(x[1])
# either index-item pair or default
ii = next(filter(func, enumerate(iterable)), default)
return ii[0] if ii else default
def first_false_index(iterable, pred=None, default=None):
"""find the first index position for the which the callable pred returns False."""
if pred is None:
func = operator.not_
else:
func = lambda x: not pred(x)
return first_true_index(iterable, func, default)
def sortmore(*args, **kw):
"""
Sorts any number of lists according to:
optionally given item sorting key function(s) and/or a global sorting key function.
Parameters
----------
One or more lists
Keywords
--------
globalkey : None
revert to sorting by key function
globalkey : callable
Sort by evaluated value for all items in the lists
(call signature of this function needs to be such that it accepts an
argument tuple of items from each list.
eg.: ``globalkey = lambda *l: sum(l)`` will order all the lists by the
sum of the items from each list
if key: None
sorting done by value of first input list
(in this case the objects in the first iterable need the comparison
methods __lt__ etc...)
if key: callable
sorting done by value of key(item) for items in first iterable
if key: tuple
sorting done by value of (key(item_0), ..., key(item_n)) for items in
the first n iterables (where n is the length of the key tuple)
i.e. the first callable is the primary sorting criterion, and the
rest act as tie-breakers.
Returns
-------
Sorted lists
Examples
--------
Capture sorting indices::
l = list('CharacterS')
In [1]: sortmore( l, range(len(l)) )
Out[1]: (['C', 'S', 'a', 'a', 'c', 'e', 'h', 'r', 'r', 't'],
[0, 9, 2, 4, 5, 7, 1, 3, 8, 6])
In [2]: sortmore( l, range(len(l)), key=str.lower )
Out[2]: (['a', 'a', 'C', 'c', 'e', 'h', 'r', 'r', 'S', 't'],
[2, 4, 0, 5, 7, 1, 3, 8, 9, 6])
"""
first = list(args[0])
if not len(first):
return args
globalkey = kw.get("globalkey")
key = kw.get("key")
if key is None:
if globalkey:
# if global sort function given and no local (secondary) key given, ==> no tiebreakers
key = lambda x: 0
else:
# if no global sort and no local sort keys given, sort by item values
key = lambda x: x
if globalkey is None:
globalkey = lambda *x: 0
if not isinstance(globalkey, collections.abc.Callable):
raise ValueError("globalkey needs to be callable")
if isinstance(key, collections.abc.Callable):
k = lambda x: (globalkey(*x), key(x[0]))
elif isinstance(key, tuple):
key = (k if k else lambda x: 0 for k in key)
k = lambda x: (globalkey(*x),) + tuple(f(z) for (f, z) in zip(key, x))
else:
raise KeyError(
"kw arg 'key' should be None, callable, or a sequence of callables, not {}".format(
type(key)
)
)
res = sorted(list(zip(*args)), key=k)
if "order" in kw:
if kw["order"].startswith(("descend", "reverse")):
res = reversed(res)
return tuple(map(list, zip(*res)))
def groupmore(func=None, *its):
"""Extends the itertools.groupby functionality to arbitrary number of iterators."""
if not func:
func = lambda x: x
its = sortmore(*its, key=func)
nfunc = lambda x: func(x[0])
zipper = itertools.groupby(zip(*its), nfunc)
unzipper = ((key, zip(*groups)) for key, groups in zipper)
return unzipper
|
c5f522e7462d8778be7494846b43db5552e6ed314b673e4dfd4791766b30c2ec | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
An extensible ASCII table reader and writer.
Classes to read DAOphot table format
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import itertools as itt
import re
from collections import OrderedDict, defaultdict
import numpy as np
from . import core, fixedwidth
from .misc import first_false_index, first_true_index, groupmore
class DaophotHeader(core.BaseHeader):
"""
Read the header from a file produced by the IRAF DAOphot routine.
"""
comment = r"\s*#K"
# Regex for extracting the format strings
re_format = re.compile(r"%-?(\d+)\.?\d?[sdfg]")
re_header_keyword = re.compile(
r"[#]K" r"\s+ (?P<name> \w+)" r"\s* = (?P<stuff> .+) $", re.VERBOSE
)
aperture_values = ()
def __init__(self):
core.BaseHeader.__init__(self)
def parse_col_defs(self, grouped_lines_dict):
"""Parse a series of column definition lines.
Examples
--------
When parsing, there may be several such blocks in a single file
(where continuation characters have already been stripped).
#N ID XCENTER YCENTER MAG MERR MSKY NITER
#U ## pixels pixels magnitudes magnitudes counts ##
#F %-9d %-10.3f %-10.3f %-12.3f %-14.3f %-15.7g %-6d
"""
line_ids = ("#N", "#U", "#F")
coldef_dict = defaultdict(list)
# Function to strip identifier lines
stripper = lambda s: s[2:].strip(" \\")
for defblock in zip(*map(grouped_lines_dict.get, line_ids)):
for key, line in zip(line_ids, map(stripper, defblock)):
coldef_dict[key].append(line.split())
# Save the original columns so we can use it later to reconstruct the
# original header for writing
if self.data.is_multiline:
# Database contains multi-aperture data.
# Autogen column names, units, formats from last row of column headers
last_names, last_units, last_formats = list(
zip(*map(coldef_dict.get, line_ids))
)[-1]
N_multiline = len(self.data.first_block)
for i in np.arange(1, N_multiline + 1).astype("U2"):
# extra column names eg. RAPERT2, SUM2 etc...
extended_names = list(map("".join, zip(last_names, itt.repeat(i))))
if i == "1": # Enumerate the names starting at 1
coldef_dict["#N"][-1] = extended_names
else:
coldef_dict["#N"].append(extended_names)
coldef_dict["#U"].append(last_units)
coldef_dict["#F"].append(last_formats)
# Get column widths from column format specifiers
get_col_width = lambda s: int(self.re_format.search(s).groups()[0])
col_widths = [
[get_col_width(f) for f in formats] for formats in coldef_dict["#F"]
]
# original data format might be shorter than 80 characters and filled with spaces
row_widths = np.fromiter(map(sum, col_widths), int)
row_short = Daophot.table_width - row_widths
# fix last column widths
for w, r in zip(col_widths, row_short):
w[-1] += r
self.col_widths = col_widths
# merge the multi-line header data into single line data
coldef_dict = {k: sum(v, []) for (k, v) in coldef_dict.items()}
return coldef_dict
def update_meta(self, lines, meta):
"""
Extract table-level keywords for DAOphot table. These are indicated by
a leading '#K ' prefix.
"""
table_meta = meta["table"]
# self.lines = self.get_header_lines(lines)
Nlines = len(self.lines)
if Nlines > 0:
# Group the header lines according to their line identifiers (#K,
# #N, #U, #F or just # (spacer line)) function that grabs the line
# identifier
get_line_id = lambda s: s.split(None, 1)[0]
# Group lines by the line identifier ('#N', '#U', '#F', '#K') and
# capture line index
gid, groups = zip(*groupmore(get_line_id, self.lines, range(Nlines)))
# Groups of lines and their indices
grouped_lines, gix = zip(*groups)
# Dict of line groups keyed by line identifiers
grouped_lines_dict = dict(zip(gid, grouped_lines))
# Update the table_meta keywords if necessary
if "#K" in grouped_lines_dict:
keywords = OrderedDict(
map(self.extract_keyword_line, grouped_lines_dict["#K"])
)
table_meta["keywords"] = keywords
coldef_dict = self.parse_col_defs(grouped_lines_dict)
line_ids = ("#N", "#U", "#F")
for name, unit, fmt in zip(*map(coldef_dict.get, line_ids)):
meta["cols"][name] = {"unit": unit, "format": fmt}
self.meta = meta
self.names = coldef_dict["#N"]
def extract_keyword_line(self, line):
"""
Extract info from a header keyword line (#K).
"""
m = self.re_header_keyword.match(line)
if m:
vals = m.group("stuff").strip().rsplit(None, 2)
keyword_dict = {
"units": vals[-2],
"format": vals[-1],
"value": (vals[0] if len(vals) > 2 else ""),
}
return m.group("name"), keyword_dict
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines`` for a DAOphot
header. The DAOphot header is specialized so that we just copy the entire BaseHeader
get_cols routine and modify as needed.
Parameters
----------
lines : list
List of table lines
Returns
-------
col : list
List of table Columns
"""
if not self.names:
raise core.InconsistentTableError("No column names found in DAOphot header")
# Create the list of io.ascii column objects
self._set_cols_from_names()
# Set unit and format as needed.
coldefs = self.meta["cols"]
for col in self.cols:
unit, fmt = map(coldefs[col.name].get, ("unit", "format"))
if unit != "##":
col.unit = unit
if fmt != "##":
col.format = fmt
# Set column start and end positions.
col_width = sum(self.col_widths, [])
ends = np.cumsum(col_width)
starts = ends - col_width
for i, col in enumerate(self.cols):
col.start, col.end = starts[i], ends[i]
col.span = col.end - col.start
if hasattr(col, "format"):
if any(x in col.format for x in "fg"):
col.type = core.FloatType
elif "d" in col.format:
col.type = core.IntType
elif "s" in col.format:
col.type = core.StrType
# INDEF is the missing value marker
self.data.fill_values.append(("INDEF", "0"))
class DaophotData(core.BaseData):
splitter_class = fixedwidth.FixedWidthSplitter
start_line = 0
comment = r"\s*#"
def __init__(self):
core.BaseData.__init__(self)
self.is_multiline = False
def get_data_lines(self, lines):
# Special case for multiline daophot databases. Extract the aperture
# values from the first multiline data block
if self.is_multiline:
# Grab the first column of the special block (aperture values) and
# recreate the aperture description string
aplist = next(zip(*map(str.split, self.first_block)))
self.header.aperture_values = tuple(map(float, aplist))
# Set self.data.data_lines to a slice of lines contain the data rows
core.BaseData.get_data_lines(self, lines)
class DaophotInputter(core.ContinuationLinesInputter):
continuation_char = "\\"
multiline_char = "*"
replace_char = " "
re_multiline = re.compile(r"(#?)[^\\*#]*(\*?)(\\*) ?$")
def search_multiline(self, lines, depth=150):
"""
Search lines for special continuation character to determine number of
continued rows in a datablock. For efficiency, depth gives the upper
limit of lines to search.
"""
# The list of apertures given in the #K APERTURES keyword may not be
# complete!! This happens if the string description of the aperture
# list is longer than the field width of the #K APERTURES field. In
# this case we have to figure out how many apertures there are based on
# the file structure.
comment, special, cont = zip(
*(self.re_multiline.search(line).groups() for line in lines[:depth])
)
# Find first non-comment line
data_start = first_false_index(comment)
# No data in lines[:depth]. This may be because there is no data in
# the file, or because the header is really huge. If the latter,
# increasing the search depth should help
if data_start is None:
return None, None, lines[:depth]
header_lines = lines[:data_start]
# Find first line ending on special row continuation character '*'
# indexed relative to data_start
first_special = first_true_index(special[data_start:depth])
if first_special is None: # no special lines
return None, None, header_lines
# last line ending on special '*', but not on line continue '/'
last_special = first_false_index(special[data_start + first_special : depth])
# index relative to first_special
# if first_special is None: #no end of special lines within search
# depth! increase search depth return self.search_multiline( lines,
# depth=2*depth )
# indexing now relative to line[0]
markers = np.cumsum([data_start, first_special, last_special])
# multiline portion of first data block
multiline_block = lines[markers[1] : markers[-1]]
return markers, multiline_block, header_lines
def process_lines(self, lines):
markers, block, header = self.search_multiline(lines)
self.data.is_multiline = markers is not None
self.data.markers = markers
self.data.first_block = block
# set the header lines returned by the search as a attribute of the header
self.data.header.lines = header
if markers is not None:
lines = lines[markers[0] :]
continuation_char = self.continuation_char
multiline_char = self.multiline_char
replace_char = self.replace_char
parts = []
outlines = []
for i, line in enumerate(lines):
mo = self.re_multiline.search(line)
if mo:
comment, special, cont = mo.groups()
if comment or cont:
line = line.replace(continuation_char, replace_char)
if special:
line = line.replace(multiline_char, replace_char)
if cont and not comment:
parts.append(line)
if not cont:
parts.append(line)
outlines.append("".join(parts))
parts = []
else:
raise core.InconsistentTableError(
f"multiline re could not match line {i}: {line}"
)
return outlines
class Daophot(core.BaseReader):
"""
DAOphot format table.
Example::
#K MERGERAD = INDEF scaleunit %-23.7g
#K IRAF = NOAO/IRAFV2.10EXPORT version %-23s
#K USER = davis name %-23s
#K HOST = tucana computer %-23s
#
#N ID XCENTER YCENTER MAG MERR MSKY NITER \\
#U ## pixels pixels magnitudes magnitudes counts ## \\
#F %-9d %-10.3f %-10.3f %-12.3f %-14.3f %-15.7g %-6d
#
#N SHARPNESS CHI PIER PERROR \\
#U ## ## ## perrors \\
#F %-23.3f %-12.3f %-6d %-13s
#
14 138.538 INDEF 15.461 0.003 34.85955 4 \\
-0.032 0.802 0 No_error
The keywords defined in the #K records are available via the output table
``meta`` attribute::
>>> import os
>>> from astropy.io import ascii
>>> filename = os.path.join(ascii.__path__[0], 'tests/data/daophot.dat')
>>> data = ascii.read(filename)
>>> for name, keyword in data.meta['keywords'].items():
... print(name, keyword['value'], keyword['units'], keyword['format'])
...
MERGERAD INDEF scaleunit %-23.7g
IRAF NOAO/IRAFV2.10EXPORT version %-23s
USER name %-23s
...
The unit and formats are available in the output table columns::
>>> for colname in data.colnames:
... col = data[colname]
... print(colname, col.unit, col.format)
...
ID None %-9d
XCENTER pixels %-10.3f
YCENTER pixels %-10.3f
...
Any column values of INDEF are interpreted as a missing value and will be
masked out in the resultant table.
In case of multi-aperture daophot files containing repeated entries for the last
row of fields, extra unique column names will be created by suffixing
corresponding field names with numbers starting from 2 to N (where N is the
total number of apertures).
For example,
first aperture radius will be RAPERT and corresponding magnitude will be MAG,
second aperture radius will be RAPERT2 and corresponding magnitude will be MAG2,
third aperture radius will be RAPERT3 and corresponding magnitude will be MAG3,
and so on.
"""
_format_name = "daophot"
_io_registry_format_aliases = ["daophot"]
_io_registry_can_write = False
_description = "IRAF DAOphot format table"
header_class = DaophotHeader
data_class = DaophotData
inputter_class = DaophotInputter
table_width = 80
def __init__(self):
core.BaseReader.__init__(self)
# The inputter needs to know about the data (see DaophotInputter.process_lines)
self.inputter.data = self.data
def write(self, table=None):
raise NotImplementedError
|
5155bcddcbb0225545119ef5fd5c922dd7f28743f37e03555d239aaa6cb5fb00 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Define the Enhanced Character-Separated-Values (ECSV) which allows for reading and
writing all the meta data associated with an astropy Table object.
"""
import json
import re
import warnings
from collections import OrderedDict
import numpy as np
from astropy.io.ascii.core import convert_numpy
from astropy.table import meta, serialize
from astropy.utils.data_info import serialize_context_as
from astropy.utils.exceptions import AstropyUserWarning
from . import basic, core
ECSV_VERSION = "1.0"
DELIMITERS = (" ", ",")
ECSV_DATATYPES = (
"bool",
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float16",
"float32",
"float64",
"float128",
"string",
) # Raise warning if not one of these standard dtypes
class InvalidEcsvDatatypeWarning(AstropyUserWarning):
"""
ECSV specific Astropy warning class.
"""
class EcsvHeader(basic.BasicHeader):
"""Header class for which the column definition line starts with the
comment character. See the :class:`CommentedHeader` class for an example.
"""
def process_lines(self, lines):
"""Return only non-blank lines that start with the comment regexp. For these
lines strip out the matching characters and leading/trailing whitespace.
"""
re_comment = re.compile(self.comment)
for line in lines:
line = line.strip()
if not line:
continue
match = re_comment.match(line)
if match:
out = line[match.end() :]
if out:
yield out
else:
# Stop iterating on first failed match for a non-blank line
return
def write(self, lines):
"""
Write header information in the ECSV ASCII format.
This function is called at the point when preprocessing has been done to
convert the input table columns to `self.cols` which is a list of
`astropy.io.ascii.core.Column` objects. In particular `col.str_vals`
is available for each column with the string representation of each
column item for output.
This format starts with a delimiter separated list of the column names
in order to make this format readable by humans and simple csv-type
readers. It then encodes the full table meta and column attributes and
meta as YAML and pretty-prints this in the header. Finally the
delimited column names are repeated again, for humans and readers that
look for the *last* comment line as defining the column names.
"""
if self.splitter.delimiter not in DELIMITERS:
raise ValueError(
"only space and comma are allowed for delimiter in ECSV format"
)
# Now assemble the header dict that will be serialized by the YAML dumper
header = {"cols": self.cols, "schema": "astropy-2.0"}
if self.table_meta:
header["meta"] = self.table_meta
# Set the delimiter only for the non-default option(s)
if self.splitter.delimiter != " ":
header["delimiter"] = self.splitter.delimiter
header_yaml_lines = [
f"%ECSV {ECSV_VERSION}",
"---",
] + meta.get_yaml_from_header(header)
lines.extend([self.write_comment + line for line in header_yaml_lines])
lines.append(self.splitter.join([x.info.name for x in self.cols]))
def write_comments(self, lines, meta):
"""
WRITE: Override the default write_comments to do nothing since this is handled
in the custom write method.
"""
pass
def update_meta(self, lines, meta):
"""
READ: Override the default update_meta to do nothing. This process is done
in get_cols() for this reader.
"""
pass
def get_cols(self, lines):
"""
READ: Initialize the header Column objects from the table ``lines``.
Parameters
----------
lines : list
List of table lines
"""
# Cache a copy of the original input lines before processing below
raw_lines = lines
# Extract non-blank comment (header) lines with comment character stripped
lines = list(self.process_lines(lines))
# Validate that this is a ECSV file
ecsv_header_re = r"""%ECSV [ ]
(?P<major> \d+)
\. (?P<minor> \d+)
\.? (?P<bugfix> \d+)? $"""
no_header_msg = (
'ECSV header line like "# %ECSV <version>" not found as first line.'
" This is required for a ECSV file."
)
if not lines:
raise core.InconsistentTableError(no_header_msg)
match = re.match(ecsv_header_re, lines[0].strip(), re.VERBOSE)
if not match:
raise core.InconsistentTableError(no_header_msg)
try:
header = meta.get_header_from_yaml(lines)
except meta.YamlParseError:
raise core.InconsistentTableError("unable to parse yaml in meta header")
if "meta" in header:
self.table_meta = header["meta"]
if "delimiter" in header:
delimiter = header["delimiter"]
if delimiter not in DELIMITERS:
raise ValueError(
"only space and comma are allowed for delimiter in ECSV format"
)
self.splitter.delimiter = delimiter
self.data.splitter.delimiter = delimiter
# Create the list of io.ascii column objects from `header`
header_cols = OrderedDict((x["name"], x) for x in header["datatype"])
self.names = [x["name"] for x in header["datatype"]]
# Read the first non-commented line of table and split to get the CSV
# header column names. This is essentially what the Basic reader does.
header_line = next(super().process_lines(raw_lines))
header_names = next(self.splitter([header_line]))
# Check for consistency of the ECSV vs. CSV header column names
if header_names != self.names:
raise core.InconsistentTableError(
f"column names from ECSV header {self.names} do not "
f"match names from header line of CSV data {header_names}"
)
# BaseHeader method to create self.cols, which is a list of
# io.ascii.core.Column objects (*not* Table Column objects).
self._set_cols_from_names()
# Transfer attributes from the column descriptor stored in the input
# header YAML metadata to the new columns to create this table.
for col in self.cols:
for attr in ("description", "format", "unit", "meta", "subtype"):
if attr in header_cols[col.name]:
setattr(col, attr, header_cols[col.name][attr])
col.dtype = header_cols[col.name]["datatype"]
# Warn if col dtype is not a valid ECSV datatype, but allow reading for
# back-compatibility with existing older files that have numpy datatypes
# like datetime64 or object or python str, which are not in the ECSV standard.
if col.dtype not in ECSV_DATATYPES:
msg = (
f"unexpected datatype {col.dtype!r} of column {col.name!r} "
f"is not in allowed ECSV datatypes {ECSV_DATATYPES}. "
"Using anyway as a numpy dtype but beware since unexpected "
"results are possible."
)
warnings.warn(msg, category=InvalidEcsvDatatypeWarning)
# Subtype is written like "int64[2,null]" and we want to split this
# out to "int64" and [2, None].
subtype = col.subtype
if subtype and "[" in subtype:
idx = subtype.index("[")
col.subtype = subtype[:idx]
col.shape = json.loads(subtype[idx:])
# Convert ECSV "string" to numpy "str"
for attr in ("dtype", "subtype"):
if getattr(col, attr) == "string":
setattr(col, attr, "str")
# ECSV subtype of 'json' maps to numpy 'object' dtype
if col.subtype == "json":
col.subtype = "object"
def _check_dtype_is_str(col):
if col.dtype != "str":
raise ValueError(f'datatype of column {col.name!r} must be "string"')
class EcsvOutputter(core.TableOutputter):
"""
After reading the input lines and processing, convert the Reader columns
and metadata to an astropy.table.Table object. This overrides the default
converters to be an empty list because there is no "guessing" of the
conversion function.
"""
default_converters = []
def __call__(self, cols, meta):
# Convert to a Table with all plain Column subclass columns
out = super().__call__(cols, meta)
# If mixin columns exist (based on the special '__mixin_columns__'
# key in the table ``meta``), then use that information to construct
# appropriate mixin columns and remove the original data columns.
# If no __mixin_columns__ exists then this function just passes back
# the input table.
out = serialize._construct_mixins_from_columns(out)
return out
def _convert_vals(self, cols):
"""READ: Convert str_vals in `cols` to final arrays with correct dtypes.
This is adapted from ``BaseOutputter._convert_vals``. In the case of ECSV
there is no guessing and all types are known in advance. A big change
is handling the possibility of JSON-encoded values, both unstructured
object data and structured values that may contain masked data.
"""
for col in cols:
try:
# 1-d or N-d object columns are serialized as JSON.
if col.subtype == "object":
_check_dtype_is_str(col)
col_vals = [json.loads(val) for val in col.str_vals]
col.data = np.empty([len(col_vals)] + col.shape, dtype=object)
col.data[...] = col_vals
# Variable length arrays with shape (n, m, ..., *) for fixed
# n, m, .. and variable in last axis. Masked values here are
# not currently supported.
elif col.shape and col.shape[-1] is None:
_check_dtype_is_str(col)
# Empty (blank) values in original ECSV are changed to "0"
# in str_vals with corresponding col.mask being created and
# set accordingly. Instead use an empty list here.
if hasattr(col, "mask"):
for idx in np.nonzero(col.mask)[0]:
col.str_vals[idx] = "[]"
# Remake as a 1-d object column of numpy ndarrays or
# MaskedArray using the datatype specified in the ECSV file.
col_vals = []
for str_val in col.str_vals:
obj_val = json.loads(str_val) # list or nested lists
try:
arr_val = np.array(obj_val, dtype=col.subtype)
except TypeError:
# obj_val has entries that are inconsistent with
# dtype. For a valid ECSV file the only possibility
# is None values (indicating missing values).
data = np.array(obj_val, dtype=object)
# Replace all the None with an appropriate fill value
mask = data == None
kind = np.dtype(col.subtype).kind
data[mask] = {"U": "", "S": b""}.get(kind, 0)
arr_val = np.ma.array(data.astype(col.subtype), mask=mask)
col_vals.append(arr_val)
col.shape = ()
col.dtype = np.dtype(object)
# np.array(col_vals_arr, dtype=object) fails ?? so this workaround:
col.data = np.empty(len(col_vals), dtype=object)
col.data[:] = col_vals
# Multidim columns with consistent shape (n, m, ...). These
# might be masked.
elif col.shape:
_check_dtype_is_str(col)
# Change empty (blank) values in original ECSV to something
# like "[[null, null],[null,null]]" so subsequent JSON
# decoding works. Delete `col.mask` so that later code in
# core TableOutputter.__call__() that deals with col.mask
# does not run (since handling is done here already).
if hasattr(col, "mask"):
all_none_arr = np.full(
shape=col.shape, fill_value=None, dtype=object
)
all_none_json = json.dumps(all_none_arr.tolist())
for idx in np.nonzero(col.mask)[0]:
col.str_vals[idx] = all_none_json
del col.mask
col_vals = [json.loads(val) for val in col.str_vals]
# Make a numpy object array of col_vals to look for None
# (masked values)
data = np.array(col_vals, dtype=object)
mask = data == None
if not np.any(mask):
# No None's, just convert to required dtype
col.data = data.astype(col.subtype)
else:
# Replace all the None with an appropriate fill value
kind = np.dtype(col.subtype).kind
data[mask] = {"U": "", "S": b""}.get(kind, 0)
# Finally make a MaskedArray with the filled data + mask
col.data = np.ma.array(data.astype(col.subtype), mask=mask)
# Regular scalar value column
else:
if col.subtype:
warnings.warn(
f"unexpected subtype {col.subtype!r} set for column "
f"{col.name!r}, using dtype={col.dtype!r} instead.",
category=InvalidEcsvDatatypeWarning,
)
converter_func, _ = convert_numpy(col.dtype)
col.data = converter_func(col.str_vals)
if col.data.shape[1:] != tuple(col.shape):
raise ValueError(
"shape mismatch between value and column specifier"
)
except json.JSONDecodeError:
raise ValueError(
f"column {col.name!r} failed to convert: "
"column value is not valid JSON"
)
except Exception as exc:
raise ValueError(f"column {col.name!r} failed to convert: {exc}")
class EcsvData(basic.BasicData):
def _set_fill_values(self, cols):
"""READ: Set the fill values of the individual cols based on fill_values of BaseData.
For ECSV handle the corner case of data that has been serialized using
the serialize_method='data_mask' option, which writes the full data and
mask directly, AND where that table includes a string column with zero-length
string entries ("") which are valid data.
Normally the super() method will set col.fill_value=('', '0') to replace
blanks with a '0'. But for that corner case subset, instead do not do
any filling.
"""
super()._set_fill_values(cols)
# Get the serialized columns spec. It might not exist and there might
# not even be any table meta, so punt in those cases.
try:
scs = self.header.table_meta["__serialized_columns__"]
except (AttributeError, KeyError):
return
# Got some serialized columns, so check for string type and serialized
# as a MaskedColumn. Without 'data_mask', MaskedColumn objects are
# stored to ECSV as normal columns.
for col in cols:
if (
col.dtype == "str"
and col.name in scs
and scs[col.name]["__class__"] == "astropy.table.column.MaskedColumn"
):
col.fill_values = {} # No data value replacement
def str_vals(self):
"""WRITE: convert all values in table to a list of lists of strings.
This version considerably simplifies the base method:
- No need to set fill values and column formats
- No per-item formatting, just use repr()
- Use JSON for object-type or multidim values
- Only Column or MaskedColumn can end up as cols here.
- Only replace masked values with "", not the generalized filling
"""
for col in self.cols:
if len(col.shape) > 1 or col.info.dtype.kind == "O":
def format_col_item(idx):
obj = col[idx]
try:
obj = obj.tolist()
except AttributeError:
pass
return json.dumps(obj, separators=(",", ":"))
else:
def format_col_item(idx):
return str(col[idx])
try:
col.str_vals = [format_col_item(idx) for idx in range(len(col))]
except TypeError as exc:
raise TypeError(
f"could not convert column {col.info.name!r} to string: {exc}"
) from exc
# Replace every masked value in a 1-d column with an empty string.
# For multi-dim columns this gets done by JSON via "null".
if hasattr(col, "mask") and col.ndim == 1:
for idx in col.mask.nonzero()[0]:
col.str_vals[idx] = ""
out = [col.str_vals for col in self.cols]
return out
class Ecsv(basic.Basic):
"""ECSV (Enhanced Character Separated Values) format table.
Th ECSV format allows for specification of key table and column meta-data, in
particular the data type and unit.
See: https://github.com/astropy/astropy-APEs/blob/main/APE6.rst
Examples
--------
>>> from astropy.table import Table
>>> ecsv_content = '''# %ECSV 0.9
... # ---
... # datatype:
... # - {name: a, unit: m / s, datatype: int64, format: '%03d'}
... # - {name: b, unit: km, datatype: int64, description: This is column b}
... a b
... 001 2
... 004 3
... '''
>>> Table.read(ecsv_content, format='ascii.ecsv')
<Table length=2>
a b
m / s km
int64 int64
----- -----
001 2
004 3
"""
_format_name = "ecsv"
_description = "Enhanced CSV"
_io_registry_suffix = ".ecsv"
header_class = EcsvHeader
data_class = EcsvData
outputter_class = EcsvOutputter
max_ndim = None # No limit on column dimensionality
def update_table_data(self, table):
"""
Update table columns in place if mixin columns are present.
This is a hook to allow updating the table columns after name
filtering but before setting up to write the data. This is currently
only used by ECSV and is otherwise just a pass-through.
Parameters
----------
table : `astropy.table.Table`
Input table for writing
Returns
-------
table : `astropy.table.Table`
Output table for writing
"""
with serialize_context_as("ecsv"):
out = serialize.represent_mixins_as_columns(table)
return out
|
ee10d80c4e39f03437e0f3526f9abc1d5147129e3cb33795874854ddadaf2c2b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains functions for reading and writing HDF5 tables that are
not meant to be used directly, but instead are available as readers/writers in
`astropy.table`. See :ref:`astropy:table_io` for more details.
"""
import os
import warnings
import numpy as np
# NOTE: Do not import anything from astropy.table here.
# https://github.com/astropy/astropy/issues/6604
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import NOT_OVERWRITING_MSG
HDF5_SIGNATURE = b"\x89HDF\r\n\x1a\n"
META_KEY = "__table_column_meta__"
__all__ = ["read_table_hdf5", "write_table_hdf5"]
def meta_path(path):
return path + "." + META_KEY
def _find_all_structured_arrays(handle):
"""
Find all structured arrays in an HDF5 file.
"""
import h5py
structured_arrays = []
def append_structured_arrays(name, obj):
if isinstance(obj, h5py.Dataset) and obj.dtype.kind == "V":
structured_arrays.append(name)
handle.visititems(append_structured_arrays)
return structured_arrays
def is_hdf5(origin, filepath, fileobj, *args, **kwargs):
if fileobj is not None:
loc = fileobj.tell()
try:
signature = fileobj.read(8)
finally:
fileobj.seek(loc)
return signature == HDF5_SIGNATURE
elif filepath is not None:
return filepath.endswith((".hdf5", ".h5"))
try:
import h5py
except ImportError:
return False
else:
return isinstance(args[0], (h5py.File, h5py.Group, h5py.Dataset))
def read_table_hdf5(input, path=None, character_as_bytes=True):
"""
Read a Table object from an HDF5 file.
This requires `h5py <http://www.h5py.org/>`_ to be installed. If more than one
table is present in the HDF5 file or group, the first table is read in and
a warning is displayed.
Parameters
----------
input : str or :class:`h5py.File` or :class:`h5py.Group` or
:class:`h5py.Dataset` If a string, the filename to read the table from.
If an h5py object, either the file or the group object to read the
table from.
path : str
The path from which to read the table inside the HDF5 file.
This should be relative to the input file or group.
character_as_bytes : bool
If `True` then Table columns are left as bytes.
If `False` then Table columns are converted to unicode.
"""
try:
import h5py
except ImportError:
raise Exception("h5py is required to read and write HDF5 files")
# This function is iterative, and only gets to writing the file when
# the input is an hdf5 Group. Moreover, the input variable is changed in
# place.
# Here, we save its value to be used at the end when the conditions are
# right.
input_save = input
if isinstance(input, (h5py.File, h5py.Group)):
# If a path was specified, follow the path
if path is not None:
try:
input = input[path]
except (KeyError, ValueError):
raise OSError(f"Path {path} does not exist")
# `input` is now either a group or a dataset. If it is a group, we
# will search for all structured arrays inside the group, and if there
# is one we can proceed otherwise an error is raised. If it is a
# dataset, we just proceed with the reading.
if isinstance(input, h5py.Group):
# Find all structured arrays in group
arrays = _find_all_structured_arrays(input)
if len(arrays) == 0:
raise ValueError(f"no table found in HDF5 group {path}")
elif len(arrays) > 0:
path = arrays[0] if path is None else path + "/" + arrays[0]
if len(arrays) > 1:
warnings.warn(
"path= was not specified but multiple tables"
" are present, reading in first available"
f" table (path={path})",
AstropyUserWarning,
)
return read_table_hdf5(input, path=path)
elif not isinstance(input, h5py.Dataset):
# If a file object was passed, then we need to extract the filename
# because h5py cannot properly read in file objects.
if hasattr(input, "read"):
try:
input = input.name
except AttributeError:
raise TypeError("h5py can only open regular files")
# Open the file for reading, and recursively call read_table_hdf5 with
# the file object and the path.
f = h5py.File(input, "r")
try:
return read_table_hdf5(f, path=path, character_as_bytes=character_as_bytes)
finally:
f.close()
# If we are here, `input` should be a Dataset object, which we can now
# convert to a Table.
# Create a Table object
from astropy.table import Table, meta, serialize
table = Table(np.array(input))
# Read the meta-data from the file. For back-compatibility, we can read
# the old file format where the serialized metadata were saved in the
# attributes of the HDF5 dataset.
# In the new format, instead, metadata are stored in a new dataset in the
# same file. This is introduced in Astropy 3.0
old_version_meta = META_KEY in input.attrs
new_version_meta = path is not None and meta_path(path) in input_save
if old_version_meta or new_version_meta:
if new_version_meta:
header = meta.get_header_from_yaml(
h.decode("utf-8") for h in input_save[meta_path(path)]
)
else:
# Must be old_version_meta is True. if (A or B) and not A then B is True
header = meta.get_header_from_yaml(
h.decode("utf-8") for h in input.attrs[META_KEY]
)
if "meta" in list(header.keys()):
table.meta = header["meta"]
header_cols = {x["name"]: x for x in header["datatype"]}
for col in table.columns.values():
for attr in ("description", "format", "unit", "meta"):
if attr in header_cols[col.name]:
setattr(col, attr, header_cols[col.name][attr])
# Construct new table with mixins, using tbl.meta['__serialized_columns__']
# as guidance.
table = serialize._construct_mixins_from_columns(table)
else:
# Read the meta-data from the file
table.meta.update(input.attrs)
if not character_as_bytes:
table.convert_bytestring_to_unicode()
return table
def _encode_mixins(tbl):
"""Encode a Table ``tbl`` that may have mixin columns to a Table with only
astropy Columns + appropriate meta-data to allow subsequent decoding.
"""
from astropy.table import serialize
from astropy.utils.data_info import serialize_context_as
# Convert the table to one with no mixins, only Column objects. This adds
# meta data which is extracted with meta.get_yaml_from_table.
with serialize_context_as("hdf5"):
encode_tbl = serialize.represent_mixins_as_columns(tbl)
return encode_tbl
def write_table_hdf5(
table,
output,
path=None,
compression=False,
append=False,
overwrite=False,
serialize_meta=False,
**create_dataset_kwargs,
):
"""
Write a Table object to an HDF5 file.
This requires `h5py <http://www.h5py.org/>`_ to be installed.
Parameters
----------
table : `~astropy.table.Table`
Data table that is to be written to file.
output : str or :class:`h5py.File` or :class:`h5py.Group`
If a string, the filename to write the table to. If an h5py object,
either the file or the group object to write the table to.
path : str
The path to which to write the table inside the HDF5 file.
This should be relative to the input file or group.
If not specified, defaults to ``__astropy_table__``.
compression : bool or str or int
Whether to compress the table inside the HDF5 file. If set to `True`,
``'gzip'`` compression is used. If a string is specified, it should be
one of ``'gzip'``, ``'szip'``, or ``'lzf'``. If an integer is
specified (in the range 0-9), ``'gzip'`` compression is used, and the
integer denotes the compression level.
append : bool
Whether to append the table to an existing HDF5 file.
overwrite : bool
Whether to overwrite any existing file without warning.
If ``append=True`` and ``overwrite=True`` then only the dataset will be
replaced; the file/group will not be overwritten.
serialize_meta : bool
Whether to serialize rich table meta-data when writing the HDF5 file, in
particular such data required to write and read back mixin columns like
``Time``, ``SkyCoord``, or ``Quantity`` to the file.
**create_dataset_kwargs
Additional keyword arguments are passed to
``h5py.File.create_dataset()`` or ``h5py.Group.create_dataset()``.
"""
from astropy.table import meta
try:
import h5py
except ImportError:
raise Exception("h5py is required to read and write HDF5 files")
if path is None:
# table is just an arbitrary, hardcoded string here.
path = "__astropy_table__"
elif path.endswith("/"):
raise ValueError("table path should end with table name, not /")
if "/" in path:
group, name = path.rsplit("/", 1)
else:
group, name = None, path
if isinstance(output, (h5py.File, h5py.Group)):
if len(list(output.keys())) > 0 and name == "__astropy_table__":
raise ValueError(
"table path should always be set via the "
"path= argument when writing to existing "
"files"
)
elif name == "__astropy_table__":
warnings.warn(
"table path was not set via the path= argument; "
f"using default path {path}"
)
if group:
try:
output_group = output[group]
except (KeyError, ValueError):
output_group = output.create_group(group)
else:
output_group = output
elif isinstance(output, str):
if os.path.exists(output) and not append:
if overwrite and not append:
os.remove(output)
else:
raise OSError(NOT_OVERWRITING_MSG.format(output))
# Open the file for appending or writing
f = h5py.File(output, "a" if append else "w")
# Recursively call the write function
try:
return write_table_hdf5(
table,
f,
path=path,
compression=compression,
append=append,
overwrite=overwrite,
serialize_meta=serialize_meta,
)
finally:
f.close()
else:
raise TypeError("output should be a string or an h5py File or Group object")
# Check whether table already exists
if name in output_group:
if append and overwrite:
# Delete only the dataset itself
del output_group[name]
if serialize_meta and name + ".__table_column_meta__" in output_group:
del output_group[name + ".__table_column_meta__"]
else:
raise OSError(f"Table {path} already exists")
# Encode any mixin columns as plain columns + appropriate metadata
table = _encode_mixins(table)
# Table with numpy unicode strings can't be written in HDF5 so
# to write such a table a copy of table is made containing columns as
# bytestrings. Now this copy of the table can be written in HDF5.
if any(col.info.dtype.kind == "U" for col in table.itercols()):
table = table.copy(copy_data=False)
table.convert_unicode_to_bytestring()
# Warn if information will be lost when serialize_meta=False. This is
# hardcoded to the set difference between column info attributes and what
# HDF5 can store natively (name, dtype) with no meta.
if serialize_meta is False:
for col in table.itercols():
for attr in ("unit", "format", "description", "meta"):
if getattr(col.info, attr, None) not in (None, {}):
warnings.warn(
"table contains column(s) with defined 'unit', 'format',"
" 'description', or 'meta' info attributes. These will"
" be dropped since serialize_meta=False.",
AstropyUserWarning,
)
# Write the table to the file
if compression:
if compression is True:
compression = "gzip"
dset = output_group.create_dataset(
name,
data=table.as_array(),
compression=compression,
**create_dataset_kwargs,
)
else:
dset = output_group.create_dataset(
name, data=table.as_array(), **create_dataset_kwargs
)
if serialize_meta:
header_yaml = meta.get_yaml_from_table(table)
header_encoded = np.array([h.encode("utf-8") for h in header_yaml])
output_group.create_dataset(meta_path(name), data=header_encoded)
else:
# Write the Table meta dict key:value pairs to the file as HDF5
# attributes. This works only for a limited set of scalar data types
# like numbers, strings, etc., but not any complex types. This path
# also ignores column meta like unit or format.
for key in table.meta:
val = table.meta[key]
try:
dset.attrs[key] = val
except TypeError:
warnings.warn(
f"Attribute `{key}` of type {type(val)} cannot be written to "
"HDF5 files - skipping. (Consider specifying "
"serialize_meta=True to write all meta data)",
AstropyUserWarning,
)
def register_hdf5():
"""
Register HDF5 with Unified I/O.
"""
from astropy.io import registry as io_registry
from astropy.table import Table
io_registry.register_reader("hdf5", Table, read_table_hdf5)
io_registry.register_writer("hdf5", Table, write_table_hdf5)
io_registry.register_identifier("hdf5", Table, is_hdf5)
|
6bfc3674c43b96516f353029896e79bc84a763af779a20d31b652b34e7cd96c5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions for serializing astropy objects to YAML.
It provides functions `~astropy.io.misc.yaml.dump`,
`~astropy.io.misc.yaml.load`, and `~astropy.io.misc.yaml.load_all` which
call the corresponding functions in `PyYaml <https://pyyaml.org>`_ but use the
`~astropy.io.misc.yaml.AstropyDumper` and `~astropy.io.misc.yaml.AstropyLoader`
classes to define custom YAML tags for the following astropy classes:
- `astropy.units.Unit`
- `astropy.units.Quantity`
- `astropy.time.Time`
- `astropy.time.TimeDelta`
- `astropy.coordinates.SkyCoord`
- `astropy.coordinates.Angle`
- `astropy.coordinates.Latitude`
- `astropy.coordinates.Longitude`
- `astropy.coordinates.EarthLocation`
- `astropy.table.SerializedColumn`
Example
=======
::
>>> from astropy.io.misc import yaml
>>> import astropy.units as u
>>> from astropy.time import Time
>>> from astropy.coordinates import EarthLocation
>>> t = Time(2457389.0, format='mjd',
... location=EarthLocation(1000, 2000, 3000, unit=u.km))
>>> td = yaml.dump(t)
>>> print(td)
!astropy.time.Time
format: mjd
in_subfmt: '*'
jd1: 4857390.0
jd2: -0.5
location: !astropy.coordinates.earth.EarthLocation
ellipsoid: WGS84
x: !astropy.units.Quantity
unit: &id001 !astropy.units.Unit {unit: km}
value: 1000.0
y: !astropy.units.Quantity
unit: *id001
value: 2000.0
z: !astropy.units.Quantity
unit: *id001
value: 3000.0
out_subfmt: '*'
precision: 3
scale: utc
>>> ty = yaml.load(td)
>>> ty
<Time object: scale='utc' format='mjd' value=2457389.0>
>>> ty.location # doctest: +FLOAT_CMP
<EarthLocation (1000., 2000., 3000.) km>
"""
import base64
import numpy as np
import yaml
from astropy import coordinates as coords
from astropy import units as u
from astropy.table import SerializedColumn
from astropy.time import Time, TimeDelta
__all__ = ["AstropyLoader", "AstropyDumper", "load", "load_all", "dump"]
def _unit_representer(dumper, obj):
out = {"unit": str(obj.to_string())}
return dumper.represent_mapping("!astropy.units.Unit", out)
def _unit_constructor(loader, node):
map = loader.construct_mapping(node)
return u.Unit(map["unit"], parse_strict="warn")
def _serialized_column_representer(dumper, obj):
out = dumper.represent_mapping("!astropy.table.SerializedColumn", obj)
return out
def _serialized_column_constructor(loader, node):
map = loader.construct_mapping(node)
return SerializedColumn(map)
def _time_representer(dumper, obj):
out = obj.info._represent_as_dict()
return dumper.represent_mapping("!astropy.time.Time", out)
def _time_constructor(loader, node):
map = loader.construct_mapping(node)
out = Time.info._construct_from_dict(map)
return out
def _timedelta_representer(dumper, obj):
out = obj.info._represent_as_dict()
return dumper.represent_mapping("!astropy.time.TimeDelta", out)
def _timedelta_constructor(loader, node):
map = loader.construct_mapping(node)
out = TimeDelta.info._construct_from_dict(map)
return out
def _ndarray_representer(dumper, obj):
if not (obj.flags["C_CONTIGUOUS"] or obj.flags["F_CONTIGUOUS"]):
obj = np.ascontiguousarray(obj)
if np.isfortran(obj):
obj = obj.T
order = "F"
else:
order = "C"
data_b64 = base64.b64encode(obj.tobytes())
out = dict(
buffer=data_b64,
dtype=str(obj.dtype) if not obj.dtype.fields else obj.dtype.descr,
shape=obj.shape,
order=order,
)
return dumper.represent_mapping("!numpy.ndarray", out)
def _ndarray_constructor(loader, node):
# Convert mapping to a dict useful for initializing ndarray.
# Need deep=True since for structured dtype, the contents
# include lists and tuples, which need recursion via
# construct_sequence.
map = loader.construct_mapping(node, deep=True)
map["buffer"] = base64.b64decode(map["buffer"])
return np.ndarray(**map)
def _void_representer(dumper, obj):
data_b64 = base64.b64encode(obj.tobytes())
out = dict(
buffer=data_b64,
dtype=str(obj.dtype) if not obj.dtype.fields else obj.dtype.descr,
)
return dumper.represent_mapping("!numpy.void", out)
def _void_constructor(loader, node):
# Interpret as node as an array scalar and then index to change to void.
map = loader.construct_mapping(node, deep=True)
map["buffer"] = base64.b64decode(map["buffer"])
return np.ndarray(shape=(), **map)[()]
def _quantity_representer(tag):
def representer(dumper, obj):
out = obj.info._represent_as_dict()
return dumper.represent_mapping(tag, out)
return representer
def _quantity_constructor(cls):
def constructor(loader, node):
map = loader.construct_mapping(node)
return cls.info._construct_from_dict(map)
return constructor
def _skycoord_representer(dumper, obj):
map = obj.info._represent_as_dict()
out = dumper.represent_mapping("!astropy.coordinates.sky_coordinate.SkyCoord", map)
return out
def _skycoord_constructor(loader, node):
map = loader.construct_mapping(node)
out = coords.SkyCoord.info._construct_from_dict(map)
return out
# Straight from yaml's Representer
def _complex_representer(self, data):
if data.imag == 0.0:
data = f"{data.real!r}"
elif data.real == 0.0:
data = f"{data.imag!r}j"
elif data.imag > 0:
data = f"{data.real!r}+{data.imag!r}j"
else:
data = f"{data.real!r}{data.imag!r}j"
return self.represent_scalar("tag:yaml.org,2002:python/complex", data)
def _complex_constructor(loader, node):
map = loader.construct_scalar(node)
return complex(map)
class AstropyLoader(yaml.SafeLoader):
"""
Custom SafeLoader that constructs astropy core objects as well
as Python tuple and unicode objects.
This class is not directly instantiated by user code, but instead is
used to maintain the available constructor functions that are
called when parsing a YAML stream. See the `PyYaml documentation
<https://pyyaml.org/wiki/PyYAMLDocumentation>`_ for details of the
class signature.
"""
def _construct_python_tuple(self, node):
return tuple(self.construct_sequence(node))
def _construct_python_unicode(self, node):
return self.construct_scalar(node)
class AstropyDumper(yaml.SafeDumper):
"""
Custom SafeDumper that represents astropy core objects as well
as Python tuple and unicode objects.
This class is not directly instantiated by user code, but instead is
used to maintain the available representer functions that are
called when generating a YAML stream from an object. See the
`PyYaml documentation <https://pyyaml.org/wiki/PyYAMLDocumentation>`_
for details of the class signature.
"""
def _represent_tuple(self, data):
return self.represent_sequence("tag:yaml.org,2002:python/tuple", data)
AstropyDumper.add_multi_representer(u.UnitBase, _unit_representer)
AstropyDumper.add_multi_representer(u.FunctionUnitBase, _unit_representer)
AstropyDumper.add_multi_representer(u.StructuredUnit, _unit_representer)
AstropyDumper.add_representer(tuple, AstropyDumper._represent_tuple)
AstropyDumper.add_representer(np.ndarray, _ndarray_representer)
AstropyDumper.add_representer(np.void, _void_representer)
AstropyDumper.add_representer(Time, _time_representer)
AstropyDumper.add_representer(TimeDelta, _timedelta_representer)
AstropyDumper.add_representer(coords.SkyCoord, _skycoord_representer)
AstropyDumper.add_representer(SerializedColumn, _serialized_column_representer)
# Numpy dtypes
AstropyDumper.add_representer(np.bool_, yaml.representer.SafeRepresenter.represent_bool)
for np_type in [
np.int_,
np.intc,
np.intp,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
]:
AstropyDumper.add_representer(
np_type, yaml.representer.SafeRepresenter.represent_int
)
for np_type in [np.float_, np.float16, np.float32, np.float64, np.longdouble]:
AstropyDumper.add_representer(
np_type, yaml.representer.SafeRepresenter.represent_float
)
for np_type in [np.complex_, complex, np.complex64, np.complex128]:
AstropyDumper.add_representer(np_type, _complex_representer)
AstropyLoader.add_constructor("tag:yaml.org,2002:python/complex", _complex_constructor)
AstropyLoader.add_constructor(
"tag:yaml.org,2002:python/tuple", AstropyLoader._construct_python_tuple
)
AstropyLoader.add_constructor(
"tag:yaml.org,2002:python/unicode", AstropyLoader._construct_python_unicode
)
AstropyLoader.add_constructor("!astropy.units.Unit", _unit_constructor)
AstropyLoader.add_constructor("!numpy.ndarray", _ndarray_constructor)
AstropyLoader.add_constructor("!numpy.void", _void_constructor)
AstropyLoader.add_constructor("!astropy.time.Time", _time_constructor)
AstropyLoader.add_constructor("!astropy.time.TimeDelta", _timedelta_constructor)
AstropyLoader.add_constructor(
"!astropy.coordinates.sky_coordinate.SkyCoord", _skycoord_constructor
)
AstropyLoader.add_constructor(
"!astropy.table.SerializedColumn", _serialized_column_constructor
)
for cls, tag in (
(u.Quantity, "!astropy.units.Quantity"),
(u.Magnitude, "!astropy.units.Magnitude"),
(u.Dex, "!astropy.units.Dex"),
(u.Decibel, "!astropy.units.Decibel"),
(coords.Angle, "!astropy.coordinates.Angle"),
(coords.Latitude, "!astropy.coordinates.Latitude"),
(coords.Longitude, "!astropy.coordinates.Longitude"),
(coords.EarthLocation, "!astropy.coordinates.earth.EarthLocation"),
):
AstropyDumper.add_multi_representer(cls, _quantity_representer(tag))
AstropyLoader.add_constructor(tag, _quantity_constructor(cls))
for cls in list(coords.representation.REPRESENTATION_CLASSES.values()) + list(
coords.representation.DIFFERENTIAL_CLASSES.values()
):
name = cls.__name__
# Add representations/differentials defined in astropy.
if name in coords.representation.__all__:
tag = "!astropy.coordinates." + name
AstropyDumper.add_multi_representer(cls, _quantity_representer(tag))
AstropyLoader.add_constructor(tag, _quantity_constructor(cls))
def load(stream):
"""Parse the first YAML document in a stream using the AstropyLoader and
produce the corresponding Python object.
Parameters
----------
stream : str or file-like
YAML input
Returns
-------
obj : object
Object corresponding to YAML document
"""
return yaml.load(stream, Loader=AstropyLoader)
def load_all(stream):
"""Parse the all YAML documents in a stream using the AstropyLoader class and
produce the corresponding Python object.
Parameters
----------
stream : str or file-like
YAML input
Returns
-------
obj : object
Object corresponding to YAML document
"""
return yaml.load_all(stream, Loader=AstropyLoader)
def dump(data, stream=None, **kwargs):
"""Serialize a Python object into a YAML stream using the AstropyDumper class.
If stream is None, return the produced string instead.
Parameters
----------
data : object
Object to serialize to YAML
stream : file-like, optional
YAML output (if not supplied a string is returned)
**kwargs
Other keyword arguments that get passed to yaml.dump()
Returns
-------
out : str or None
If no ``stream`` is supplied then YAML output is returned as str
"""
kwargs["Dumper"] = AstropyDumper
kwargs.setdefault("default_flow_style", None)
return yaml.dump(data, stream=stream, **kwargs)
|
8ea8951a245b12f62413ad9020e64c3eb1d2a0ae66d68af0bc5a848ef3abacd5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains simple input/output related functionality that is not
part of a larger framework or standard.
"""
import pickle
__all__ = ["fnpickle", "fnunpickle"]
def fnunpickle(fileorname, number=0):
"""Unpickle pickled objects from a specified file and return the contents.
Parameters
----------
fileorname : str or file-like
The file name or file from which to unpickle objects. If a file object,
it should have been opened in binary mode.
number : int
If 0, a single object will be returned (the first in the file). If >0,
this specifies the number of objects to be unpickled, and a list will
be returned with exactly that many objects. If <0, all objects in the
file will be unpickled and returned as a list.
Raises
------
EOFError
If ``number`` is >0 and there are fewer than ``number`` objects in the
pickled file.
Returns
-------
contents : object or list
If ``number`` is 0, this is a individual object - the first one
unpickled from the file. Otherwise, it is a list of objects unpickled
from the file.
"""
if isinstance(fileorname, str):
f = open(fileorname, "rb")
close = True
else:
f = fileorname
close = False
try:
if number > 0: # get that number
res = []
for i in range(number):
res.append(pickle.load(f))
elif number < 0: # get all objects
res = []
eof = False
while not eof:
try:
res.append(pickle.load(f))
except EOFError:
eof = True
else: # number==0
res = pickle.load(f)
finally:
if close:
f.close()
return res
def fnpickle(object, fileorname, protocol=None, append=False):
"""Pickle an object to a specified file.
Parameters
----------
object
The python object to pickle.
fileorname : str or file-like
The filename or file into which the `object` should be pickled. If a
file object, it should have been opened in binary mode.
protocol : int or None
Pickle protocol to use - see the :mod:`pickle` module for details on
these options. If None, the most recent protocol will be used.
append : bool
If True, the object is appended to the end of the file, otherwise the
file will be overwritten (if a file object is given instead of a
file name, this has no effect).
"""
if protocol is None:
protocol = pickle.HIGHEST_PROTOCOL
if isinstance(fileorname, str):
f = open(fileorname, "ab" if append else "wb")
close = True
else:
f = fileorname
close = False
try:
pickle.dump(object, f, protocol=protocol)
finally:
if close:
f.close()
|
9291825dcec3a872ff2a645ba7e88e3cb95f3fe28150476804d14fd0dd8e7f25 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains functions for reading and writing Parquet
tables that are not meant to be used directly, but instead are
available as readers/writers in `astropy.table`. See
:ref:`astropy:table_io` for more details.
"""
import os
import warnings
import numpy as np
from astropy.utils import minversion
# NOTE: Do not import anything from astropy.table here.
# https://github.com/astropy/astropy/issues/6604
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import NOT_OVERWRITING_MSG
PARQUET_SIGNATURE = b"PAR1"
__all__ = [] # nothing is publicly scoped
def parquet_identify(origin, filepath, fileobj, *args, **kwargs):
"""Checks if input is in the Parquet format.
Parameters
----------
origin : Any
filepath : str or None
fileobj : `~pyarrow.NativeFile` or None
*args, **kwargs
Returns
-------
is_parquet : bool
True if 'fileobj' is not None and is a pyarrow file, or if
'filepath' is a string ending with '.parquet' or '.parq'.
False otherwise.
"""
if fileobj is not None:
try: # safely test if pyarrow file
pos = fileobj.tell() # store current stream position
except AttributeError:
return False
signature = fileobj.read(4) # read first 4 bytes
fileobj.seek(pos) # return to original location
return signature == PARQUET_SIGNATURE
elif filepath is not None:
return filepath.endswith((".parquet", ".parq"))
else:
return False
def read_table_parquet(
input, include_names=None, exclude_names=None, schema_only=False, filters=None
):
"""
Read a Table object from a Parquet file.
This requires `pyarrow <https://arrow.apache.org/docs/python/>`_
to be installed.
The ``filters`` parameter consists of predicates that are expressed
in disjunctive normal form (DNF), like ``[[('x', '=', 0), ...], ...]``.
DNF allows arbitrary boolean logical combinations of single column
predicates. The innermost tuples each describe a single column predicate.
The list of inner predicates is interpreted as a conjunction (AND),
forming a more selective and multiple column predicate. Finally, the most
outer list combines these filters as a disjunction (OR).
Predicates may also be passed as List[Tuple]. This form is interpreted
as a single conjunction. To express OR in predicates, one must
use the (preferred) List[List[Tuple]] notation.
Each tuple has format: (``key``, ``op``, ``value``) and compares the
``key`` with the ``value``.
The supported ``op`` are: ``=`` or ``==``, ``!=``, ``<``, ``>``, ``<=``,
``>=``, ``in`` and ``not in``. If the ``op`` is ``in`` or ``not in``, the
``value`` must be a collection such as a ``list``, a ``set`` or a
``tuple``.
Examples:
.. code-block:: python
('x', '=', 0)
('y', 'in', ['a', 'b', 'c'])
('z', 'not in', {'a','b'})
Parameters
----------
input : str or path-like or file-like object
If a string or path-like object, the filename to read the table from.
If a file-like object, the stream to read data.
include_names : list [str], optional
List of names to include in output. If not supplied, then
include all columns.
exclude_names : list [str], optional
List of names to exclude from output (applied after ``include_names``).
If not supplied then no columns are excluded.
schema_only : bool, optional
Only read the schema/metadata with table information.
filters : list [tuple] or list [list [tuple] ] or None, optional
Rows which do not match the filter predicate will be removed from
scanned data. See `pyarrow.parquet.read_table()` for details.
Returns
-------
table : `~astropy.table.Table`
Table will have zero rows and only metadata information
if schema_only is True.
"""
pa, parquet, _ = get_pyarrow()
if not isinstance(input, (str, os.PathLike)):
# The 'read' attribute is the key component of a generic
# file-like object.
if not hasattr(input, "read"):
raise TypeError("pyarrow can only open path-like or file-like objects.")
schema = parquet.read_schema(input)
# Pyarrow stores all metadata as byte-strings, so we convert
# to UTF-8 strings here.
if schema.metadata is not None:
md = {k.decode("UTF-8"): v.decode("UTF-8") for k, v in schema.metadata.items()}
else:
md = {}
from astropy.table import Column, Table, meta, serialize
# parse metadata from table yaml
meta_dict = {}
if "table_meta_yaml" in md:
meta_yaml = md.pop("table_meta_yaml").split("\n")
meta_hdr = meta.get_header_from_yaml(meta_yaml)
if "meta" in meta_hdr:
meta_dict = meta_hdr["meta"]
else:
meta_hdr = None
# parse and set serialized columns
full_table_columns = {name: name for name in schema.names}
has_serialized_columns = False
if "__serialized_columns__" in meta_dict:
has_serialized_columns = True
serialized_columns = meta_dict["__serialized_columns__"]
for scol in serialized_columns:
for name in _get_names(serialized_columns[scol]):
full_table_columns[name] = scol
use_names = set(full_table_columns.values())
# Apply include_names before exclude_names
if include_names is not None:
use_names.intersection_update(include_names)
if exclude_names is not None:
use_names.difference_update(exclude_names)
# Preserve column ordering via list, and use this dict trick
# to remove duplicates and preserve ordering (for mixin columns)
use_names = list(
dict.fromkeys([x for x in full_table_columns.values() if x in use_names])
)
# names_to_read is a list of actual serialized column names, where
# e.g. the requested name 'time' becomes ['time.jd1', 'time.jd2']
names_to_read = []
for name in use_names:
names = [n for n, col in full_table_columns.items() if name == col]
names_to_read.extend(names)
if not names_to_read:
raise ValueError("No include_names specified were found in the table.")
# We need to pop any unread serialized columns out of the meta_dict.
if has_serialized_columns:
for scol in list(meta_dict["__serialized_columns__"].keys()):
if scol not in use_names:
meta_dict["__serialized_columns__"].pop(scol)
# whether to return the whole table or a formatted empty table.
if not schema_only:
# Read the pyarrow table, specifying columns and filters.
pa_table = parquet.read_table(input, columns=names_to_read, filters=filters)
num_rows = pa_table.num_rows
else:
num_rows = 0
# Determine numpy/astropy types of columns from the arrow table.
dtype = []
for name in names_to_read:
t = schema.field(name).type
shape = None
if isinstance(t, pa.FixedSizeListType):
# The FixedSizeListType has an arrow value_type and a size.
value_type = t.value_type
shape = (t.list_size,)
elif isinstance(t, pa.ListType):
# The ListType (variable length arrays) has a value type.
value_type = t.value_type
else:
# All other arrow column types are the value_type.
value_type = t
if value_type not in (pa.string(), pa.binary()):
# Convert the pyarrow value type into a numpy dtype (which is returned
# by the to_pandas_type() method).
# If this is an array column, the numpy dtype needs the shape as well.
if shape is None:
dtype.append(value_type.to_pandas_dtype())
else:
dtype.append((value_type.to_pandas_dtype(), shape))
continue
# Special-case for string and binary columns
md_name = f"table::len::{name}"
if md_name in md:
# String/bytes length from header.
strlen = int(md[md_name])
elif schema_only: # Find the maximum string length.
# Choose an arbitrary string length since
# are not reading in the table.
strlen = 10
warnings.warn(
f"No {md_name} found in metadata. Guessing {{strlen}} for schema.",
AstropyUserWarning,
)
else:
strlen = max(len(row.as_py()) for row in pa_table[name])
warnings.warn(
f"No {md_name} found in metadata. Using longest string"
f" ({strlen} characters).",
AstropyUserWarning,
)
strname = f"U{strlen}" if value_type == pa.string() else f"|S{strlen}"
# If this is an array column, the numpy dtype needs the shape as well.
if shape is None:
dtype.append(strname)
else:
dtype.append((strname, shape))
if schema_only:
# If we only need the schema, create an empty table with the correct dtype.
data = np.zeros(0, dtype=list(zip(names_to_read, dtype)))
table = Table(data=data, meta=meta_dict)
else:
# If we need the full table, create the table and add the columns
# one at a time. This minimizes data copying.
table = Table(meta=meta_dict)
for name, dt in zip(names_to_read, dtype):
# First convert the arrow column to a numpy array.
col = pa_table[name].to_numpy()
t = schema.field(name).type
if t in (pa.string(), pa.binary()):
# If it is a string/binary type, coerce it to the correct type.
col = col.astype(dt)
elif isinstance(t, pa.FixedSizeListType):
# If it is a FixedSizeListType (array column) then it needs to
# be broken into a 2D array, but only if the table has a non-zero
# length.
if len(col) > 0:
col = np.stack(col)
if t.value_type in (pa.string(), pa.binary()):
# If it is a string/binary type, coerce it to the
# correct type.
# The conversion dtype is only the first element
# in the dtype tuple.
col = col.astype(dt[0])
else:
# This is an empty column, and needs to be created with the
# correct type.
col = np.zeros(0, dtype=dt)
elif isinstance(t, pa.ListType):
# If we have a variable length string/binary column,
# we need to convert each row to the proper type.
if t.value_type in (pa.string(), pa.binary()):
col = np.array([row.astype(dt) for row in col], dtype=np.object_)
table.add_column(Column(name=name, data=col))
if meta_hdr is not None:
# Set description, format, unit, meta from the column
# metadata that was serialized with the table.
header_cols = {x["name"]: x for x in meta_hdr["datatype"]}
for col in table.columns.values():
for attr in ("description", "format", "unit", "meta"):
if attr in header_cols[col.name]:
setattr(col, attr, header_cols[col.name][attr])
# Convert all compound columns to astropy objects
# (e.g. time.jd1, time.jd2 into a single time column)
table = serialize._construct_mixins_from_columns(table)
return table
def write_table_parquet(table, output, overwrite=False):
"""
Write a Table object to a Parquet file.
The parquet writer supports tables with regular columns, fixed-size array
columns, and variable-length array columns (provided all arrays have the
same type).
This requires `pyarrow <https://arrow.apache.org/docs/python/>`_
to be installed.
Parameters
----------
table : `~astropy.table.Table`
Data table that is to be written to file.
output : str or path-like
The filename to write the table to.
overwrite : bool, optional
Whether to overwrite any existing file without warning. Default `False`.
Notes
-----
Tables written with array columns (fixed-size or variable-length) cannot
be read with pandas.
Raises
------
ValueError
If one of the columns has a mixed-type variable-length array, or
if it is a zero-length table and any of the columns are variable-length
arrays.
"""
from astropy.table import meta, serialize
from astropy.utils.data_info import serialize_context_as
pa, parquet, writer_version = get_pyarrow()
if not isinstance(output, (str, os.PathLike)):
raise TypeError(f"`output` should be a string or path-like, not {output}")
# Convert all compound columns into serialized column names, where
# e.g. 'time' becomes ['time.jd1', 'time.jd2'].
with serialize_context_as("parquet"):
encode_table = serialize.represent_mixins_as_columns(table)
# We store the encoded serialization metadata as a yaml string.
meta_yaml = meta.get_yaml_from_table(encode_table)
meta_yaml_str = "\n".join(meta_yaml)
# Build the pyarrow schema by converting from the numpy dtype of each
# column to an equivalent pyarrow type with from_numpy_dtype()
type_list = []
for name in encode_table.dtype.names:
dt = encode_table.dtype[name]
if dt.type == np.object_:
# If the column type is np.object_, then it should be a column
# of variable-length arrays. This can be serialized with parquet
# provided all of the elements have the same data-type.
# Additionally, if the table has no elements, we cannot deduce
# the datatype, and hence cannot serialize the table.
if len(encode_table) > 0:
obj_dtype = encode_table[name][0].dtype
# Check that the variable-length array all has the same type.
for row in encode_table[name]:
if row.dtype != obj_dtype:
raise ValueError(
f"Cannot serialize mixed-type column ({name}) with parquet."
)
# Calling pa.list_() creates a ListType which is an array of variable-
# length elements.
arrow_type = pa.list_(
value_type=pa.from_numpy_dtype(obj_dtype.type),
)
else:
raise ValueError(
"Cannot serialize zero-length table "
f"with object column ({name}) with parquet."
)
elif len(dt.shape) > 0:
# This column has a shape, and is an array type column. Calling
# pa.list_() with a list_size creates a FixedSizeListType, which
# is an array of fixed-length elements.
arrow_type = pa.list_(
value_type=pa.from_numpy_dtype(dt.subdtype[0].type),
list_size=np.prod(dt.shape),
)
else:
# This is a standard column.
arrow_type = pa.from_numpy_dtype(dt.type)
type_list.append((name, arrow_type))
metadata = {}
for name, col in encode_table.columns.items():
# Parquet will retain the datatypes of columns, but string and
# byte column length is lost. Therefore, we special-case these
# types to record the length for precise round-tripping.
t = col.dtype.type
itemsize = col.dtype.itemsize
if t is np.object_:
t = encode_table[name][0].dtype.type
if t == np.str_ or t == np.bytes_:
# We need to scan through all of them.
itemsize = -1
for row in encode_table[name]:
itemsize = max(itemsize, row.dtype.itemsize)
if t is np.str_:
metadata[f"table::len::{name}"] = str(itemsize // 4)
elif t is np.bytes_:
metadata[f"table::len::{name}"] = str(itemsize)
metadata["table_meta_yaml"] = meta_yaml_str
# Pyarrow stores all metadata as byte strings, so we explicitly encode
# our unicode strings in metadata as UTF-8 byte strings here.
metadata_encode = {
k.encode("UTF-8"): v.encode("UTF-8") for k, v in metadata.items()
}
schema = pa.schema(type_list, metadata=metadata_encode)
if os.path.exists(output):
if overwrite:
# We must remove the file prior to writing below.
os.remove(output)
else:
raise OSError(NOT_OVERWRITING_MSG.format(output))
# We use version='2.0' for full support of datatypes including uint32.
with parquet.ParquetWriter(output, schema, version=writer_version) as writer:
# Convert each Table column to a pyarrow array
arrays = []
for name in encode_table.dtype.names:
dt = encode_table.dtype[name]
if dt.type == np.object_:
# Turn the column into a list of numpy arrays.
val = [row for row in encode_table[name]]
elif len(dt.shape) > 0:
if len(encode_table) > 0:
val = np.split(encode_table[name].ravel(), len(encode_table))
else:
val = []
else:
val = encode_table[name]
arrays.append(pa.array(val, type=schema.field(name).type))
# Create a pyarrow table from the list of arrays and the schema
pa_table = pa.Table.from_arrays(arrays, schema=schema)
# Write the pyarrow table to a file
writer.write_table(pa_table)
def _get_names(_dict):
"""Recursively find the names in a serialized column dictionary.
Parameters
----------
_dict : `dict`
Dictionary from astropy __serialized_columns__
Returns
-------
all_names : `list` [`str`]
All the column names mentioned in _dict and sub-dicts.
"""
all_names = []
for k, v in _dict.items():
if isinstance(v, dict):
all_names.extend(_get_names(v))
elif k == "name":
all_names.append(v)
return all_names
def register_parquet():
"""
Register Parquet with Unified I/O.
"""
from astropy.io import registry as io_registry
from astropy.table import Table
io_registry.register_reader("parquet", Table, read_table_parquet)
io_registry.register_writer("parquet", Table, write_table_parquet)
io_registry.register_identifier("parquet", Table, parquet_identify)
def get_pyarrow():
try:
import pyarrow as pa
from pyarrow import parquet
except ImportError:
raise Exception("pyarrow is required to read and write parquet files")
if minversion(pa, "6.0.0"):
writer_version = "2.4"
else:
writer_version = "2.0"
return pa, parquet, writer_version
|
6d59800503e8b23822f820d7448f7f97f8cd3e127b1c8c253388bab6dd45308d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Mixin columns for use in ascii/tests/test_ecsv.py, fits/tests/test_connect.py,
and misc/tests/test_hdf5.py.
"""
import numpy as np
from astropy import coordinates, table, time
from astropy import units as u
el = coordinates.EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km)
sr = coordinates.SphericalRepresentation([0, 1] * u.deg, [2, 3] * u.deg, 1 * u.kpc)
cr = coordinates.CartesianRepresentation([0, 1] * u.pc, [4, 5] * u.pc, [8, 6] * u.pc)
sd = coordinates.SphericalCosLatDifferential(
[0, 1] * u.mas / u.yr, [0, 1] * u.mas / u.yr, 10 * u.km / u.s
)
srd = coordinates.SphericalRepresentation(sr, differentials=sd)
sc = coordinates.SkyCoord(
[1, 2], [3, 4], unit="deg,deg", frame="fk4", obstime="J1990.5"
)
scd = coordinates.SkyCoord(
[1, 2], [3, 4], [5, 6], unit="deg,deg,m", frame="fk4", obstime=["J1990.5"] * 2
)
scdc = scd.copy()
scdc.representation_type = "cartesian"
scpm = coordinates.SkyCoord(
[1, 2],
[3, 4],
[5, 6],
unit="deg,deg,pc",
pm_ra_cosdec=[7, 8] * u.mas / u.yr,
pm_dec=[9, 10] * u.mas / u.yr,
)
scpmrv = coordinates.SkyCoord(
[1, 2],
[3, 4],
[5, 6],
unit="deg,deg,pc",
pm_ra_cosdec=[7, 8] * u.mas / u.yr,
pm_dec=[9, 10] * u.mas / u.yr,
radial_velocity=[11, 12] * u.km / u.s,
)
scrv = coordinates.SkyCoord(
[1, 2], [3, 4], [5, 6], unit="deg,deg,pc", radial_velocity=[11, 12] * u.km / u.s
)
tm = time.Time(
[51000.5, 51001.5], format="mjd", scale="tai", precision=5, location=el[0]
)
tm2 = time.Time(tm, precision=3, format="iso")
tm3 = time.Time(tm, location=el)
tm3.info.serialize_method["ecsv"] = "jd1_jd2"
obj = table.Column([{"a": 1}, {"b": [2]}], dtype="object")
su = table.Column(
[(1, (1.5, 1.6)), (2, (2.5, 2.6))],
name="su",
dtype=[("i", np.int64), ("f", [("p1", np.float64), ("p0", np.float64)])],
)
su2 = table.Column(
[(["snake", "c"], [1.6, 1.5]), (["eal", "a"], [2.5, 2.6])],
dtype=[("name", "U5", (2,)), ("f", "f8", (2,))],
)
# NOTE: for testing, the name of the column "x" for the
# Quantity is important since it tests the fix for #10215
# (namespace clash, where "x" clashes with "el.x").
mixin_cols = {
"tm": tm,
"tm2": tm2,
"tm3": tm3,
"dt": time.TimeDelta([1, 2] * u.day),
"sc": sc,
"scd": scd,
"scdc": scdc,
"scpm": scpm,
"scpmrv": scpmrv,
"scrv": scrv,
"x": [1, 2] * u.m,
"qdb": [10, 20] * u.dB(u.mW),
"qdex": [4.5, 5.5] * u.dex(u.cm / u.s**2),
"qmag": [21, 22] * u.ABmag,
"lat": coordinates.Latitude([1, 2] * u.deg),
"lon": coordinates.Longitude([1, 2] * u.deg, wrap_angle=180.0 * u.deg),
"ang": coordinates.Angle([1, 2] * u.deg),
"el": el,
"sr": sr,
"cr": cr,
"sd": sd,
"srd": srd,
"nd": table.NdarrayMixin([1, 2]),
"obj": obj,
"su": su,
"su2": su2,
}
time_attrs = [
"value",
"shape",
"format",
"scale",
"precision",
"in_subfmt",
"out_subfmt",
"location",
]
compare_attrs = {
"tm": time_attrs,
"tm2": time_attrs,
"tm3": time_attrs,
"dt": ["shape", "value", "format", "scale"],
"sc": ["ra", "dec", "representation_type", "frame.name"],
"scd": ["ra", "dec", "distance", "representation_type", "frame.name"],
"scdc": ["x", "y", "z", "representation_type", "frame.name"],
"scpm": [
"ra",
"dec",
"distance",
"pm_ra_cosdec",
"pm_dec",
"representation_type",
"frame.name",
],
"scpmrv": [
"ra",
"dec",
"distance",
"pm_ra_cosdec",
"pm_dec",
"radial_velocity",
"representation_type",
"frame.name",
],
"scrv": [
"ra",
"dec",
"distance",
"radial_velocity",
"representation_type",
"frame.name",
],
"x": ["value", "unit"],
"qdb": ["value", "unit"],
"qdex": ["value", "unit"],
"qmag": ["value", "unit"],
"lon": ["value", "unit", "wrap_angle"],
"lat": ["value", "unit"],
"ang": ["value", "unit"],
"el": ["x", "y", "z", "ellipsoid"],
"nd": ["data"],
"sr": ["lon", "lat", "distance"],
"cr": ["x", "y", "z"],
"sd": ["d_lon_coslat", "d_lat", "d_distance"],
"srd": [
"lon",
"lat",
"distance",
"differentials.s.d_lon_coslat",
"differentials.s.d_lat",
"differentials.s.d_distance",
],
"obj": [],
"su": ["i", "f.p0", "f.p1"],
"su2": ["name", "f"],
}
non_trivial_names = {
"cr": ["cr.x", "cr.y", "cr.z"],
"dt": ["dt.jd1", "dt.jd2"],
"el": ["el.x", "el.y", "el.z"],
"sc": ["sc.ra", "sc.dec"],
"scd": ["scd.ra", "scd.dec", "scd.distance", "scd.obstime.jd1", "scd.obstime.jd2"],
"scdc": ["scdc.x", "scdc.y", "scdc.z", "scdc.obstime.jd1", "scdc.obstime.jd2"],
"scfc": ["scdc.x", "scdc.y", "scdc.z", "scdc.obstime.jd1", "scdc.obstime.jd2"],
"scpm": [
"scpm.ra",
"scpm.dec",
"scpm.distance",
"scpm.pm_ra_cosdec",
"scpm.pm_dec",
],
"scpmrv": [
"scpmrv.ra",
"scpmrv.dec",
"scpmrv.distance",
"scpmrv.pm_ra_cosdec",
"scpmrv.pm_dec",
"scpmrv.radial_velocity",
],
"scrv": ["scrv.ra", "scrv.dec", "scrv.distance", "scrv.radial_velocity"],
"sd": ["sd.d_lon_coslat", "sd.d_lat", "sd.d_distance"],
"sr": ["sr.lon", "sr.lat", "sr.distance"],
"srd": [
"srd.lon",
"srd.lat",
"srd.distance",
"srd.differentials.s.d_lon_coslat",
"srd.differentials.s.d_lat",
"srd.differentials.s.d_distance",
],
"su": ["su.i", "su.f.p1", "su.f.p0"],
"su2": ["su2.name", "su2.f"],
"tm": ["tm.jd1", "tm.jd2"],
"tm2": ["tm2.jd1", "tm2.jd2"],
"tm3": ["tm3.jd1", "tm3.jd2", "tm3.location.x", "tm3.location.y", "tm3.location.z"],
}
serialized_names = {
name: non_trivial_names.get(name, [name]) for name in sorted(mixin_cols)
}
|
5baf5183c768c5c56f859d38eabb2f94a083a36c8491a72399319f01fe1faa27 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
class CatchZeroByteWriter(io.BufferedWriter):
"""File handle to intercept 0-byte writes."""
def write(self, buffer):
nbytes = super().write(buffer)
if nbytes == 0:
raise ValueError("This writer does not allow empty writes")
return nbytes
|
ac57120a86ebf13026e8b594d5cf9e204ce8ed6915c3fa763d0c6514c109c5a1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import sys
from collections import OrderedDict
from .base import IORegistryError, _UnifiedIORegistryBase
__all__ = ["UnifiedIORegistry", "UnifiedInputRegistry", "UnifiedOutputRegistry"]
PATH_TYPES = (str, os.PathLike) # TODO! include bytes
def _expand_user_in_args(args):
# Conservatively attempt to apply `os.path.expanduser` to the first
# argument, which can be either a path or the contents of a table.
if len(args) and isinstance(args[0], PATH_TYPES):
ex_user = os.path.expanduser(args[0])
if ex_user != args[0] and os.path.exists(os.path.dirname(ex_user)):
args = (ex_user,) + args[1:]
return args
# -----------------------------------------------------------------------------
class UnifiedInputRegistry(_UnifiedIORegistryBase):
"""Read-only Unified Registry.
.. versionadded:: 5.0
Examples
--------
First let's start by creating a read-only registry.
.. code-block:: python
>>> from astropy.io.registry import UnifiedInputRegistry
>>> read_reg = UnifiedInputRegistry()
There is nothing in this registry. Let's make a reader for the
:class:`~astropy.table.Table` class::
from astropy.table import Table
def my_table_reader(filename, some_option=1):
# Read in the table by any means necessary
return table # should be an instance of Table
Such a function can then be registered with the I/O registry::
read_reg.register_reader('my-table-format', Table, my_table_reader)
Note that we CANNOT then read in a table with::
d = Table.read('my_table_file.mtf', format='my-table-format')
Why? because ``Table.read`` uses Astropy's default global registry and this
is a separate registry.
Instead we can read by the read method on the registry::
d = read_reg.read(Table, 'my_table_file.mtf', format='my-table-format')
"""
def __init__(self):
super().__init__() # set _identifiers
self._readers = OrderedDict()
self._registries["read"] = dict(attr="_readers", column="Read")
self._registries_order = ("read", "identify")
# =========================================================================
# Read methods
def register_reader(
self, data_format, data_class, function, force=False, priority=0
):
"""
Register a reader function.
Parameters
----------
data_format : str
The data format identifier. This is the string that will be used to
specify the data type when reading.
data_class : class
The class of the object that the reader produces.
function : function
The function to read in a data object.
force : bool, optional
Whether to override any existing function if already present.
Default is ``False``.
priority : int, optional
The priority of the reader, used to compare possible formats when
trying to determine the best reader to use. Higher priorities are
preferred over lower priorities, with the default priority being 0
(negative numbers are allowed though).
"""
if (data_format, data_class) not in self._readers or force:
self._readers[(data_format, data_class)] = function, priority
else:
raise IORegistryError(
f"Reader for format '{data_format}' and class '{data_class.__name__}'"
" is already defined"
)
if data_class not in self._delayed_docs_classes:
self._update__doc__(data_class, "read")
def unregister_reader(self, data_format, data_class):
"""
Unregister a reader function.
Parameters
----------
data_format : str
The data format identifier.
data_class : class
The class of the object that the reader produces.
"""
if (data_format, data_class) in self._readers:
self._readers.pop((data_format, data_class))
else:
raise IORegistryError(
f"No reader defined for format '{data_format}' and class"
f" '{data_class.__name__}'"
)
if data_class not in self._delayed_docs_classes:
self._update__doc__(data_class, "read")
def get_reader(self, data_format, data_class):
"""Get reader for ``data_format``.
Parameters
----------
data_format : str
The data format identifier. This is the string that is used to
specify the data type when reading/writing.
data_class : class
The class of the object that can be written.
Returns
-------
reader : callable
The registered reader function for this format and class.
"""
readers = [(fmt, cls) for fmt, cls in self._readers if fmt == data_format]
for reader_format, reader_class in readers:
if self._is_best_match(data_class, reader_class, readers):
return self._readers[(reader_format, reader_class)][0]
else:
format_table_str = self._get_format_table_str(data_class, "Read")
raise IORegistryError(
f"No reader defined for format '{data_format}' and class"
f" '{data_class.__name__}'.\n\nThe available formats"
f" are:\n\n{format_table_str}"
)
def read(self, cls, *args, format=None, cache=False, **kwargs):
"""
Read in data.
Parameters
----------
cls : class
*args
The arguments passed to this method depend on the format.
format : str or None
cache : bool
Whether to cache the results of reading in the data.
**kwargs
The arguments passed to this method depend on the format.
Returns
-------
object or None
The output of the registered reader.
"""
ctx = None
try:
# Expand a tilde-prefixed path if present in args[0]
args = _expand_user_in_args(args)
if format is None:
path = None
fileobj = None
if len(args):
if isinstance(args[0], PATH_TYPES) and not os.path.isdir(args[0]):
from astropy.utils.data import get_readable_fileobj
# path might be a os.PathLike object
if isinstance(args[0], os.PathLike):
args = (os.fspath(args[0]),) + args[1:]
path = args[0]
try:
ctx = get_readable_fileobj(
args[0], encoding="binary", cache=cache
)
fileobj = ctx.__enter__()
except OSError:
raise
except Exception:
fileobj = None
else:
args = [fileobj] + list(args[1:])
elif hasattr(args[0], "read"):
path = None
fileobj = args[0]
format = self._get_valid_format(
"read", cls, path, fileobj, args, kwargs
)
reader = self.get_reader(format, cls)
data = reader(*args, **kwargs)
if not isinstance(data, cls):
# User has read with a subclass where only the parent class is
# registered. This returns the parent class, so try coercing
# to desired subclass.
try:
data = cls(data)
except Exception:
raise TypeError(
f"could not convert reader output to {cls.__name__} class."
)
finally:
if ctx is not None:
ctx.__exit__(*sys.exc_info())
return data
# -----------------------------------------------------------------------------
class UnifiedOutputRegistry(_UnifiedIORegistryBase):
"""Write-only Registry.
.. versionadded:: 5.0
"""
def __init__(self):
super().__init__()
self._writers = OrderedDict()
self._registries["write"] = dict(attr="_writers", column="Write")
self._registries_order = ("write", "identify")
# =========================================================================
# Write Methods
def register_writer(
self, data_format, data_class, function, force=False, priority=0
):
"""
Register a table writer function.
Parameters
----------
data_format : str
The data format identifier. This is the string that will be used to
specify the data type when writing.
data_class : class
The class of the object that can be written.
function : function
The function to write out a data object.
force : bool, optional
Whether to override any existing function if already present.
Default is ``False``.
priority : int, optional
The priority of the writer, used to compare possible formats when trying
to determine the best writer to use. Higher priorities are preferred
over lower priorities, with the default priority being 0 (negative
numbers are allowed though).
"""
if not (data_format, data_class) in self._writers or force: # noqa: E713
self._writers[(data_format, data_class)] = function, priority
else:
raise IORegistryError(
f"Writer for format '{data_format}' and class '{data_class.__name__}'"
" is already defined"
)
if data_class not in self._delayed_docs_classes:
self._update__doc__(data_class, "write")
def unregister_writer(self, data_format, data_class):
"""
Unregister a writer function.
Parameters
----------
data_format : str
The data format identifier.
data_class : class
The class of the object that can be written.
"""
if (data_format, data_class) in self._writers:
self._writers.pop((data_format, data_class))
else:
raise IORegistryError(
f"No writer defined for format '{data_format}' and class"
f" '{data_class.__name__}'"
)
if data_class not in self._delayed_docs_classes:
self._update__doc__(data_class, "write")
def get_writer(self, data_format, data_class):
"""Get writer for ``data_format``.
Parameters
----------
data_format : str
The data format identifier. This is the string that is used to
specify the data type when reading/writing.
data_class : class
The class of the object that can be written.
Returns
-------
writer : callable
The registered writer function for this format and class.
"""
writers = [(fmt, cls) for fmt, cls in self._writers if fmt == data_format]
for writer_format, writer_class in writers:
if self._is_best_match(data_class, writer_class, writers):
return self._writers[(writer_format, writer_class)][0]
else:
format_table_str = self._get_format_table_str(data_class, "Write")
raise IORegistryError(
f"No writer defined for format '{data_format}' and class"
f" '{data_class.__name__}'.\n\nThe available formats"
f" are:\n\n{format_table_str}"
)
def write(self, data, *args, format=None, **kwargs):
"""
Write out data.
Parameters
----------
data : object
The data to write.
*args
The arguments passed to this method depend on the format.
format : str or None
**kwargs
The arguments passed to this method depend on the format.
Returns
-------
object or None
The output of the registered writer. Most often `None`.
.. versionadded:: 4.3
"""
# Expand a tilde-prefixed path if present in args[0]
args = _expand_user_in_args(args)
if format is None:
path = None
fileobj = None
if len(args):
if isinstance(args[0], PATH_TYPES):
# path might be a os.PathLike object
if isinstance(args[0], os.PathLike):
args = (os.fspath(args[0]),) + args[1:]
path = args[0]
fileobj = None
elif hasattr(args[0], "read"):
path = None
fileobj = args[0]
format = self._get_valid_format(
"write", data.__class__, path, fileobj, args, kwargs
)
writer = self.get_writer(format, data.__class__)
return writer(data, *args, **kwargs)
# -----------------------------------------------------------------------------
class UnifiedIORegistry(UnifiedInputRegistry, UnifiedOutputRegistry):
"""Unified I/O Registry.
.. versionadded:: 5.0
"""
def __init__(self):
super().__init__()
self._registries_order = ("read", "write", "identify")
def get_formats(self, data_class=None, readwrite=None):
"""
Get the list of registered I/O formats as a `~astropy.table.Table`.
Parameters
----------
data_class : class, optional
Filter readers/writer to match data class (default = all classes).
readwrite : str or None, optional
Search only for readers (``"Read"``) or writers (``"Write"``).
If None search for both. Default is None.
.. versionadded:: 1.3
Returns
-------
format_table : :class:`~astropy.table.Table`
Table of available I/O formats.
"""
return super().get_formats(data_class, readwrite)
|
03bc10343aabf53bdd70da65ccbccab659589f21c44b5df7de1b16b300fa673b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import abc
import contextlib
import re
import warnings
from collections import OrderedDict
from operator import itemgetter
import numpy as np
__all__ = ["IORegistryError"]
class IORegistryError(Exception):
"""Custom error for registry clashes."""
pass
# -----------------------------------------------------------------------------
class _UnifiedIORegistryBase(metaclass=abc.ABCMeta):
"""Base class for registries in Astropy's Unified IO.
This base class provides identification functions and miscellaneous
utilities. For an example how to build a registry subclass we suggest
:class:`~astropy.io.registry.UnifiedInputRegistry`, which enables
read-only registries. These higher-level subclasses will probably serve
better as a baseclass, for instance
:class:`~astropy.io.registry.UnifiedIORegistry` subclasses both
:class:`~astropy.io.registry.UnifiedInputRegistry` and
:class:`~astropy.io.registry.UnifiedOutputRegistry` to enable both
reading from and writing to files.
.. versionadded:: 5.0
"""
def __init__(self):
# registry of identifier functions
self._identifiers = OrderedDict()
# what this class can do: e.g. 'read' &/or 'write'
self._registries = dict()
self._registries["identify"] = dict(attr="_identifiers", column="Auto-identify")
self._registries_order = ("identify",) # match keys in `_registries`
# If multiple formats are added to one class the update of the docs is quite
# expensive. Classes for which the doc update is temporarily delayed are added
# to this set.
self._delayed_docs_classes = set()
@property
def available_registries(self):
"""Available registries.
Returns
-------
``dict_keys``
"""
return self._registries.keys()
def get_formats(self, data_class=None, filter_on=None):
"""
Get the list of registered formats as a `~astropy.table.Table`.
Parameters
----------
data_class : class or None, optional
Filter readers/writer to match data class (default = all classes).
filter_on : str or None, optional
Which registry to show. E.g. "identify"
If None search for both. Default is None.
Returns
-------
format_table : :class:`~astropy.table.Table`
Table of available I/O formats.
Raises
------
ValueError
If ``filter_on`` is not None nor a registry name.
"""
from astropy.table import Table
# set up the column names
colnames = (
"Data class",
"Format",
*[self._registries[k]["column"] for k in self._registries_order],
"Deprecated",
)
i_dataclass = colnames.index("Data class")
i_format = colnames.index("Format")
i_regstart = colnames.index(
self._registries[self._registries_order[0]]["column"]
)
i_deprecated = colnames.index("Deprecated")
# registries
regs = set()
for k in self._registries.keys() - {"identify"}:
regs |= set(getattr(self, self._registries[k]["attr"]))
format_classes = sorted(regs, key=itemgetter(0))
# the format classes from all registries except "identify"
rows = []
for fmt, cls in format_classes:
# see if can skip, else need to document in row
if data_class is not None and not self._is_best_match(
data_class, cls, format_classes
):
continue
# flags for each registry
has_ = {
k: "Yes" if (fmt, cls) in getattr(self, v["attr"]) else "No"
for k, v in self._registries.items()
}
# Check if this is a short name (e.g. 'rdb') which is deprecated in
# favor of the full 'ascii.rdb'.
ascii_format_class = ("ascii." + fmt, cls)
# deprecation flag
deprecated = "Yes" if ascii_format_class in format_classes else ""
# add to rows
rows.append(
(
cls.__name__,
fmt,
*[has_[n] for n in self._registries_order],
deprecated,
)
)
# filter_on can be in self_registries_order or None
if str(filter_on).lower() in self._registries_order:
index = self._registries_order.index(str(filter_on).lower())
rows = [row for row in rows if row[i_regstart + index] == "Yes"]
elif filter_on is not None:
raise ValueError(
'unrecognized value for "filter_on": {0}.\n'
f"Allowed are {self._registries_order} and None."
)
# Sorting the list of tuples is much faster than sorting it after the
# table is created. (#5262)
if rows:
# Indices represent "Data Class", "Deprecated" and "Format".
data = list(
zip(*sorted(rows, key=itemgetter(i_dataclass, i_deprecated, i_format)))
)
else:
data = None
# make table
# need to filter elementwise comparison failure issue
# https://github.com/numpy/numpy/issues/6784
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=FutureWarning)
format_table = Table(data, names=colnames)
if not np.any(format_table["Deprecated"].data == "Yes"):
format_table.remove_column("Deprecated")
return format_table
@contextlib.contextmanager
def delay_doc_updates(self, cls):
"""Contextmanager to disable documentation updates when registering
reader and writer. The documentation is only built once when the
contextmanager exits.
.. versionadded:: 1.3
Parameters
----------
cls : class
Class for which the documentation updates should be delayed.
Notes
-----
Registering multiple readers and writers can cause significant overhead
because the documentation of the corresponding ``read`` and ``write``
methods are build every time.
Examples
--------
see for example the source code of ``astropy.table.__init__``.
"""
self._delayed_docs_classes.add(cls)
yield
self._delayed_docs_classes.discard(cls)
for method in self._registries.keys() - {"identify"}:
self._update__doc__(cls, method)
# =========================================================================
# Identifier methods
def register_identifier(self, data_format, data_class, identifier, force=False):
"""
Associate an identifier function with a specific data type.
Parameters
----------
data_format : str
The data format identifier. This is the string that is used to
specify the data type when reading/writing.
data_class : class
The class of the object that can be written.
identifier : function
A function that checks the argument specified to `read` or `write` to
determine whether the input can be interpreted as a table of type
``data_format``. This function should take the following arguments:
- ``origin``: A string ``"read"`` or ``"write"`` identifying whether
the file is to be opened for reading or writing.
- ``path``: The path to the file.
- ``fileobj``: An open file object to read the file's contents, or
`None` if the file could not be opened.
- ``*args``: Positional arguments for the `read` or `write`
function.
- ``**kwargs``: Keyword arguments for the `read` or `write`
function.
One or both of ``path`` or ``fileobj`` may be `None`. If they are
both `None`, the identifier will need to work from ``args[0]``.
The function should return True if the input can be identified
as being of format ``data_format``, and False otherwise.
force : bool, optional
Whether to override any existing function if already present.
Default is ``False``.
Examples
--------
To set the identifier based on extensions, for formats that take a
filename as a first argument, you can do for example
.. code-block:: python
from astropy.io.registry import register_identifier
from astropy.table import Table
def my_identifier(*args, **kwargs):
return isinstance(args[0], str) and args[0].endswith('.tbl')
register_identifier('ipac', Table, my_identifier)
unregister_identifier('ipac', Table)
"""
if not (data_format, data_class) in self._identifiers or force: # noqa: E713
self._identifiers[(data_format, data_class)] = identifier
else:
raise IORegistryError(
f"Identifier for format {data_format!r} and class"
f" {data_class.__name__!r} is already defined"
)
def unregister_identifier(self, data_format, data_class):
"""
Unregister an identifier function.
Parameters
----------
data_format : str
The data format identifier.
data_class : class
The class of the object that can be read/written.
"""
if (data_format, data_class) in self._identifiers:
self._identifiers.pop((data_format, data_class))
else:
raise IORegistryError(
f"No identifier defined for format {data_format!r} and class"
f" {data_class.__name__!r}"
)
def identify_format(self, origin, data_class_required, path, fileobj, args, kwargs):
"""Loop through identifiers to see which formats match.
Parameters
----------
origin : str
A string ``"read`` or ``"write"`` identifying whether the file is to be
opened for reading or writing.
data_class_required : object
The specified class for the result of `read` or the class that is to be
written.
path : str or path-like or None
The path to the file or None.
fileobj : file-like or None.
An open file object to read the file's contents, or ``None`` if the
file could not be opened.
args : sequence
Positional arguments for the `read` or `write` function. Note that
these must be provided as sequence.
kwargs : dict-like
Keyword arguments for the `read` or `write` function. Note that this
parameter must be `dict`-like.
Returns
-------
valid_formats : list
List of matching formats.
"""
valid_formats = []
for data_format, data_class in self._identifiers:
if self._is_best_match(data_class_required, data_class, self._identifiers):
if self._identifiers[(data_format, data_class)](
origin, path, fileobj, *args, **kwargs
):
valid_formats.append(data_format)
return valid_formats
# =========================================================================
# Utils
def _get_format_table_str(self, data_class, filter_on):
"""``get_formats()``, without column "Data class", as a str."""
format_table = self.get_formats(data_class, filter_on)
format_table.remove_column("Data class")
format_table_str = "\n".join(format_table.pformat(max_lines=-1))
return format_table_str
def _is_best_match(self, class1, class2, format_classes):
"""Determine if class2 is the "best" match for class1 in the list of classes.
It is assumed that (class2 in classes) is True.
class2 is the the best match if:
- ``class1`` is a subclass of ``class2`` AND
- ``class2`` is the nearest ancestor of ``class1`` that is in classes
(which includes the case that ``class1 is class2``)
"""
if issubclass(class1, class2):
classes = {cls for fmt, cls in format_classes}
for parent in class1.__mro__:
if parent is class2: # class2 is closest registered ancestor
return True
if parent in classes: # class2 was superseded
return False
return False
def _get_valid_format(self, mode, cls, path, fileobj, args, kwargs):
"""
Returns the first valid format that can be used to read/write the data in
question. Mode can be either 'read' or 'write'.
"""
valid_formats = self.identify_format(mode, cls, path, fileobj, args, kwargs)
if len(valid_formats) == 0:
format_table_str = self._get_format_table_str(cls, mode.capitalize())
raise IORegistryError(
"Format could not be identified based on the"
" file name or contents, please provide a"
" 'format' argument.\n"
f"The available formats are:\n{format_table_str}"
)
elif len(valid_formats) > 1:
return self._get_highest_priority_format(mode, cls, valid_formats)
return valid_formats[0]
def _get_highest_priority_format(self, mode, cls, valid_formats):
"""
Returns the reader or writer with the highest priority. If it is a tie,
error.
"""
if mode == "read":
format_dict = self._readers
mode_loader = "reader"
elif mode == "write":
format_dict = self._writers
mode_loader = "writer"
best_formats = []
current_priority = -np.inf
for format in valid_formats:
try:
_, priority = format_dict[(format, cls)]
except KeyError:
# We could throw an exception here, but get_reader/get_writer handle
# this case better, instead maximally deprioritise the format.
priority = -np.inf
if priority == current_priority:
best_formats.append(format)
elif priority > current_priority:
best_formats = [format]
current_priority = priority
if len(best_formats) > 1:
raise IORegistryError(
"Format is ambiguous - options are:"
f" {', '.join(sorted(valid_formats, key=itemgetter(0)))}"
)
return best_formats[0]
def _update__doc__(self, data_class, readwrite):
"""
Update the docstring to include all the available readers / writers for
the ``data_class.read``/``data_class.write`` functions (respectively).
Don't update if the data_class does not have the relevant method.
"""
# abort if method "readwrite" isn't on data_class
if not hasattr(data_class, readwrite):
return
from .interface import UnifiedReadWrite
FORMATS_TEXT = "The available built-in formats are:"
# Get the existing read or write method and its docstring
class_readwrite_func = getattr(data_class, readwrite)
if not isinstance(class_readwrite_func.__doc__, str):
# No docstring--could just be test code, or possibly code compiled
# without docstrings
return
lines = class_readwrite_func.__doc__.splitlines()
# Find the location of the existing formats table if it exists
sep_indices = [ii for ii, line in enumerate(lines) if FORMATS_TEXT in line]
if sep_indices:
# Chop off the existing formats table, including the initial blank line
chop_index = sep_indices[0]
lines = lines[:chop_index]
# Find the minimum indent, skipping the first line because it might be odd
matches = [re.search(r"(\S)", line) for line in lines[1:]]
left_indent = " " * min(match.start() for match in matches if match)
# Get the available unified I/O formats for this class
# Include only formats that have a reader, and drop the 'Data class' column
format_table = self.get_formats(data_class, readwrite.capitalize())
format_table.remove_column("Data class")
# Get the available formats as a table, then munge the output of pformat()
# a bit and put it into the docstring.
new_lines = format_table.pformat(max_lines=-1, max_width=80)
table_rst_sep = re.sub("-", "=", new_lines[1])
new_lines[1] = table_rst_sep
new_lines.insert(0, table_rst_sep)
new_lines.append(table_rst_sep)
# Check for deprecated names and include a warning at the end.
if "Deprecated" in format_table.colnames:
new_lines.extend(
[
"",
"Deprecated format names like ``aastex`` will be "
"removed in a future version. Use the full ",
"name (e.g. ``ascii.aastex``) instead.",
]
)
new_lines = [FORMATS_TEXT, ""] + new_lines
lines.extend([left_indent + line for line in new_lines])
# Depending on Python version and whether class_readwrite_func is
# an instancemethod or classmethod, one of the following will work.
if isinstance(class_readwrite_func, UnifiedReadWrite):
class_readwrite_func.__class__.__doc__ = "\n".join(lines)
else:
try:
class_readwrite_func.__doc__ = "\n".join(lines)
except AttributeError:
class_readwrite_func.__func__.__doc__ = "\n".join(lines)
|
e3888f94e0e85e5e39d15109525711755c47e1080d3d2fb697924fd9c8d1136d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import os
import re
from .base import IORegistryError
__all__ = ["UnifiedReadWriteMethod", "UnifiedReadWrite"]
# -----------------------------------------------------------------------------
class UnifiedReadWrite:
"""Base class for the worker object used in unified read() or write() methods.
This lightweight object is created for each `read()` or `write()` call
via ``read`` / ``write`` descriptors on the data object class. The key
driver is to allow complete format-specific documentation of available
method options via a ``help()`` method, e.g. ``Table.read.help('fits')``.
Subclasses must define a ``__call__`` method which is what actually gets
called when the data object ``read()`` or ``write()`` method is called.
For the canonical example see the `~astropy.table.Table` class
implementation (in particular the ``connect.py`` module there).
Parameters
----------
instance : object
Descriptor calling instance or None if no instance
cls : type
Descriptor calling class (either owner class or instance class)
method_name : str
Method name, e.g. 'read' or 'write'
registry : ``_UnifiedIORegistryBase`` or None, optional
The IO registry.
"""
def __init__(self, instance, cls, method_name, registry=None):
if registry is None:
from astropy.io.registry.compat import default_registry as registry
self._registry = registry
self._instance = instance
self._cls = cls
self._method_name = method_name # 'read' or 'write'
@property
def registry(self):
"""Unified I/O registry instance."""
return self._registry
def help(self, format=None, out=None):
"""Output help documentation for the specified unified I/O ``format``.
By default the help output is printed to the console via ``pydoc.pager``.
Instead one can supplied a file handle object as ``out`` and the output
will be written to that handle.
Parameters
----------
format : str
Unified I/O format name, e.g. 'fits' or 'ascii.ecsv'
out : None or path-like
Output destination (default is stdout via a pager)
"""
cls = self._cls
method_name = self._method_name
# Get reader or writer function associated with the registry
get_func = (
self._registry.get_reader
if method_name == "read"
else self._registry.get_writer
)
try:
if format:
read_write_func = get_func(format, cls)
except IORegistryError as err:
reader_doc = "ERROR: " + str(err)
else:
if format:
# Format-specific
header = (
f"{cls.__name__}.{method_name}(format='{format}') documentation\n"
)
doc = read_write_func.__doc__
else:
# General docs
header = f"{cls.__name__}.{method_name} general documentation\n"
doc = getattr(cls, method_name).__doc__
reader_doc = re.sub(".", "=", header)
reader_doc += header
reader_doc += re.sub(".", "=", header)
reader_doc += os.linesep
if doc is not None:
reader_doc += inspect.cleandoc(doc)
if out is None:
import pydoc
pydoc.pager(reader_doc)
else:
out.write(reader_doc)
def list_formats(self, out=None):
"""Print a list of available formats to console (or ``out`` filehandle).
out : None or file handle object
Output destination (default is stdout via a pager)
"""
tbl = self._registry.get_formats(self._cls, self._method_name.capitalize())
del tbl["Data class"]
if out is None:
tbl.pprint(max_lines=-1, max_width=-1)
else:
out.write("\n".join(tbl.pformat(max_lines=-1, max_width=-1)))
return out
# -----------------------------------------------------------------------------
class UnifiedReadWriteMethod(property):
"""Descriptor class for creating read() and write() methods in unified I/O.
The canonical example is in the ``Table`` class, where the ``connect.py``
module creates subclasses of the ``UnifiedReadWrite`` class. These have
custom ``__call__`` methods that do the setup work related to calling the
registry read() or write() functions. With this, the ``Table`` class
defines read and write methods as follows::
read = UnifiedReadWriteMethod(TableRead)
write = UnifiedReadWriteMethod(TableWrite)
Parameters
----------
func : `~astropy.io.registry.UnifiedReadWrite` subclass
Class that defines read or write functionality
"""
# We subclass property to ensure that __set__ is defined and that,
# therefore, we are a data descriptor, which cannot be overridden.
# This also means we automatically inherit the __doc__ of fget (which will
# be a UnifiedReadWrite subclass), and that this docstring gets recognized
# and properly typeset by sphinx (which was previously an issue; see
# gh-11554).
# We override __get__ to pass both instance and class to UnifiedReadWrite.
def __get__(self, instance, owner_cls):
return self.fget(instance, owner_cls)
|
68f6718aeb8498de9e4232475ef7089e32ae699010cf7e6b913cc2ae3133d20c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module handles the conversion of various VOTABLE datatypes
to/from TABLEDATA_ and BINARY_ formats.
"""
# STDLIB
import re
import sys
from struct import pack as _struct_pack
from struct import unpack as _struct_unpack
# THIRD-PARTY
import numpy as np
from numpy import ma
# ASTROPY
from astropy.utils.xml.writer import xml_escape_cdata
# LOCAL
from .exceptions import (
E01,
E02,
E03,
E04,
E05,
E06,
E24,
W01,
W30,
W31,
W39,
W46,
W47,
W49,
W51,
W55,
vo_raise,
vo_warn,
warn_or_raise,
)
__all__ = ["get_converter", "Converter", "table_column_to_votable_datatype"]
pedantic_array_splitter = re.compile(r" +")
array_splitter = re.compile(r"\s+|(?:\s*,\s*)")
"""
A regex to handle splitting values on either whitespace or commas.
SPEC: Usage of commas is not actually allowed by the spec, but many
files in the wild use them.
"""
_zero_int = b"\0\0\0\0"
_empty_bytes = b""
_zero_byte = b"\0"
struct_unpack = _struct_unpack
struct_pack = _struct_pack
if sys.byteorder == "little":
def _ensure_bigendian(x):
if x.dtype.byteorder != ">":
return x.byteswap()
return x
else:
def _ensure_bigendian(x):
if x.dtype.byteorder == "<":
return x.byteswap()
return x
def _make_masked_array(data, mask):
"""
Masked arrays of zero length that also have a mask of zero length
cause problems in Numpy (at least in 1.6.2). This function
creates a masked array from data and a mask, unless it is zero
length.
"""
# np.ma doesn't like setting mask to []
if len(data):
return ma.array(np.array(data), mask=np.array(mask, dtype="bool"))
else:
return ma.array(np.array(data))
def bitarray_to_bool(data, length):
"""
Converts a bit array (a string of bits in a bytes object) to a
boolean Numpy array.
Parameters
----------
data : bytes
The bit array. The most significant byte is read first.
length : int
The number of bits to read. The least significant bits in the
data bytes beyond length will be ignored.
Returns
-------
array : numpy bool array
"""
results = []
for byte in data:
for bit_no in range(7, -1, -1):
bit = byte & (1 << bit_no)
bit = bit != 0
results.append(bit)
if len(results) == length:
break
if len(results) == length:
break
return np.array(results, dtype="b1")
def bool_to_bitarray(value):
"""
Converts a numpy boolean array to a bit array (a string of bits in
a bytes object).
Parameters
----------
value : numpy bool array
Returns
-------
bit_array : bytes
The first value in the input array will be the most
significant bit in the result. The length will be `floor((N +
7) / 8)` where `N` is the length of `value`.
"""
value = value.flat
bit_no = 7
byte = 0
bytes = []
for v in value:
if v:
byte |= 1 << bit_no
if bit_no == 0:
bytes.append(byte)
bit_no = 7
byte = 0
else:
bit_no -= 1
if bit_no != 7:
bytes.append(byte)
return struct_pack(f"{len(bytes)}B", *bytes)
class Converter:
"""
The base class for all converters. Each subclass handles
converting a specific VOTABLE data type to/from the TABLEDATA_ and
BINARY_ on-disk representations.
Parameters
----------
field : `~astropy.io.votable.tree.Field`
object describing the datatype
config : dict
The parser configuration dictionary
pos : tuple
The position in the XML file where the FIELD object was
found. Used for error messages.
"""
def __init__(self, field, config=None, pos=None):
pass
@staticmethod
def _parse_length(read):
return struct_unpack(">I", read(4))[0]
@staticmethod
def _write_length(length):
return struct_pack(">I", int(length))
def supports_empty_values(self, config):
"""
Returns True when the field can be completely empty.
"""
return config.get("version_1_3_or_later")
def parse(self, value, config=None, pos=None):
"""
Convert the string *value* from the TABLEDATA_ format into an
object with the correct native in-memory datatype and mask flag.
Parameters
----------
value : str
value in TABLEDATA format
Returns
-------
native : tuple
A two-element tuple of: value, mask.
The value as a Numpy array or scalar, and *mask* is True
if the value is missing.
"""
raise NotImplementedError("This datatype must implement a 'parse' method.")
def parse_scalar(self, value, config=None, pos=None):
"""
Parse a single scalar of the underlying type of the converter.
For non-array converters, this is equivalent to parse. For
array converters, this is used to parse a single
element of the array.
Parameters
----------
value : str
value in TABLEDATA format
Returns
-------
native : (2,) tuple
(value, mask)
The value as a Numpy array or scalar, and *mask* is True
if the value is missing.
"""
return self.parse(value, config, pos)
def output(self, value, mask):
"""
Convert the object *value* (in the native in-memory datatype)
to a unicode string suitable for serializing in the TABLEDATA_
format.
Parameters
----------
value
The value, the native type corresponding to this converter
mask : bool
If `True`, will return the string representation of a
masked value.
Returns
-------
tabledata_repr : unicode
"""
raise NotImplementedError("This datatype must implement a 'output' method.")
def binparse(self, read):
"""
Reads some number of bytes from the BINARY_ format
representation by calling the function *read*, and returns the
native in-memory object representation for the datatype
handled by *self*.
Parameters
----------
read : function
A function that given a number of bytes, returns a byte
string.
Returns
-------
native : (2,) tuple
(value, mask). The value as a Numpy array or scalar, and *mask* is
True if the value is missing.
"""
raise NotImplementedError("This datatype must implement a 'binparse' method.")
def binoutput(self, value, mask):
"""
Convert the object *value* in the native in-memory datatype to
a string of bytes suitable for serialization in the BINARY_
format.
Parameters
----------
value
The value, the native type corresponding to this converter
mask : bool
If `True`, will return the string representation of a
masked value.
Returns
-------
bytes : bytes
The binary representation of the value, suitable for
serialization in the BINARY_ format.
"""
raise NotImplementedError("This datatype must implement a 'binoutput' method.")
class Char(Converter):
"""
Handles the char datatype. (7-bit unsigned characters).
Missing values are not handled for string or unicode types.
"""
default = _empty_bytes
def __init__(self, field, config=None, pos=None):
if config is None:
config = {}
Converter.__init__(self, field, config, pos)
self.field_name = field.name
if field.arraysize is None:
vo_warn(W47, (), config, pos)
field.arraysize = "1"
if field.arraysize == "*":
self.format = "O"
self.binparse = self._binparse_var
self.binoutput = self._binoutput_var
self.arraysize = "*"
else:
if field.arraysize.endswith("*"):
field.arraysize = field.arraysize[:-1]
try:
self.arraysize = int(field.arraysize)
except ValueError:
vo_raise(E01, (field.arraysize, "char", field.ID), config)
self.format = f"U{self.arraysize:d}"
self.binparse = self._binparse_fixed
self.binoutput = self._binoutput_fixed
self._struct_format = f">{self.arraysize:d}s"
def supports_empty_values(self, config):
return True
def parse(self, value, config=None, pos=None):
if self.arraysize != "*" and len(value) > self.arraysize:
vo_warn(W46, ("char", self.arraysize), config, pos)
# Warn about non-ascii characters if warnings are enabled.
try:
value.encode("ascii")
except UnicodeEncodeError:
vo_warn(W55, (self.field_name, value), config, pos)
return value, False
def output(self, value, mask):
if mask:
return ""
# The output methods for Char assume that value is either str or bytes.
# This method needs to return a str, but needs to warn if the str contains
# non-ASCII characters.
try:
if isinstance(value, str):
value.encode("ascii")
else:
# Check for non-ASCII chars in the bytes object.
value = value.decode("ascii")
except (ValueError, UnicodeEncodeError):
warn_or_raise(E24, UnicodeEncodeError, (value, self.field_name))
finally:
if isinstance(value, bytes):
# Convert the bytes to str regardless of non-ASCII chars.
value = value.decode("utf-8")
return xml_escape_cdata(value)
def _binparse_var(self, read):
length = self._parse_length(read)
return read(length).decode("ascii"), False
def _binparse_fixed(self, read):
s = struct_unpack(self._struct_format, read(self.arraysize))[0]
end = s.find(_zero_byte)
s = s.decode("ascii")
if end != -1:
return s[:end], False
return s, False
def _binoutput_var(self, value, mask):
if mask or value is None or value == "":
return _zero_int
if isinstance(value, str):
try:
value = value.encode("ascii")
except ValueError:
vo_raise(E24, (value, self.field_name))
return self._write_length(len(value)) + value
def _binoutput_fixed(self, value, mask):
if mask:
value = _empty_bytes
elif isinstance(value, str):
try:
value = value.encode("ascii")
except ValueError:
vo_raise(E24, (value, self.field_name))
return struct_pack(self._struct_format, value)
class UnicodeChar(Converter):
"""
Handles the unicodeChar data type. UTF-16-BE.
Missing values are not handled for string or unicode types.
"""
default = ""
def __init__(self, field, config=None, pos=None):
Converter.__init__(self, field, config, pos)
if field.arraysize is None:
vo_warn(W47, (), config, pos)
field.arraysize = "1"
if field.arraysize == "*":
self.format = "O"
self.binparse = self._binparse_var
self.binoutput = self._binoutput_var
self.arraysize = "*"
else:
try:
self.arraysize = int(field.arraysize)
except ValueError:
vo_raise(E01, (field.arraysize, "unicode", field.ID), config)
self.format = f"U{self.arraysize:d}"
self.binparse = self._binparse_fixed
self.binoutput = self._binoutput_fixed
self._struct_format = f">{self.arraysize*2:d}s"
def parse(self, value, config=None, pos=None):
if self.arraysize != "*" and len(value) > self.arraysize:
vo_warn(W46, ("unicodeChar", self.arraysize), config, pos)
return value, False
def output(self, value, mask):
if mask:
return ""
return xml_escape_cdata(str(value))
def _binparse_var(self, read):
length = self._parse_length(read)
return read(length * 2).decode("utf_16_be"), False
def _binparse_fixed(self, read):
s = struct_unpack(self._struct_format, read(self.arraysize * 2))[0]
s = s.decode("utf_16_be")
end = s.find("\0")
if end != -1:
return s[:end], False
return s, False
def _binoutput_var(self, value, mask):
if mask or value is None or value == "":
return _zero_int
encoded = value.encode("utf_16_be")
return self._write_length(len(encoded) / 2) + encoded
def _binoutput_fixed(self, value, mask):
if mask:
value = ""
return struct_pack(self._struct_format, value.encode("utf_16_be"))
class Array(Converter):
"""
Handles both fixed and variable-lengths arrays.
"""
def __init__(self, field, config=None, pos=None):
if config is None:
config = {}
Converter.__init__(self, field, config, pos)
if config.get("verify", "ignore") == "exception":
self._splitter = self._splitter_pedantic
else:
self._splitter = self._splitter_lax
def parse_scalar(self, value, config=None, pos=0):
return self._base.parse_scalar(value, config, pos)
@staticmethod
def _splitter_pedantic(value, config=None, pos=None):
return pedantic_array_splitter.split(value)
@staticmethod
def _splitter_lax(value, config=None, pos=None):
if "," in value:
vo_warn(W01, (), config, pos)
return array_splitter.split(value)
class VarArray(Array):
"""
Handles variable lengths arrays (i.e. where *arraysize* is '*').
"""
format = "O"
def __init__(self, field, base, arraysize, config=None, pos=None):
Array.__init__(self, field, config)
self._base = base
self.default = np.array([], dtype=self._base.format)
def output(self, value, mask):
output = self._base.output
result = [output(x, m) for x, m in np.broadcast(value, mask)]
return " ".join(result)
def binparse(self, read):
length = self._parse_length(read)
result = []
result_mask = []
binparse = self._base.binparse
for i in range(length):
val, mask = binparse(read)
result.append(val)
result_mask.append(mask)
return _make_masked_array(result, result_mask), False
def binoutput(self, value, mask):
if value is None or len(value) == 0:
return _zero_int
length = len(value)
result = [self._write_length(length)]
binoutput = self._base.binoutput
for x, m in zip(value, value.mask):
result.append(binoutput(x, m))
return _empty_bytes.join(result)
class ArrayVarArray(VarArray):
"""
Handles an array of variable-length arrays, i.e. where *arraysize*
ends in '*'.
"""
def parse(self, value, config=None, pos=None):
if value.strip() == "":
return ma.array([]), False
parts = self._splitter(value, config, pos)
items = self._base._items
parse_parts = self._base.parse_parts
if len(parts) % items != 0:
vo_raise(E02, (items, len(parts)), config, pos)
result = []
result_mask = []
for i in range(0, len(parts), items):
value, mask = parse_parts(parts[i : i + items], config, pos)
result.append(value)
result_mask.append(mask)
return _make_masked_array(result, result_mask), False
class ScalarVarArray(VarArray):
"""
Handles a variable-length array of numeric scalars.
"""
def parse(self, value, config=None, pos=None):
if value.strip() == "":
return ma.array([]), False
parts = self._splitter(value, config, pos)
parse = self._base.parse
result = []
result_mask = []
for x in parts:
value, mask = parse(x, config, pos)
result.append(value)
result_mask.append(mask)
return _make_masked_array(result, result_mask), False
class NumericArray(Array):
"""
Handles a fixed-length array of numeric scalars.
"""
vararray_type = ArrayVarArray
def __init__(self, field, base, arraysize, config=None, pos=None):
Array.__init__(self, field, config, pos)
self._base = base
self._arraysize = arraysize
self.format = f"{tuple(arraysize)}{base.format}"
self._items = 1
for dim in arraysize:
self._items *= dim
self._memsize = np.dtype(self.format).itemsize
self._bigendian_format = ">" + self.format
self.default = np.empty(arraysize, dtype=self._base.format)
self.default[...] = self._base.default
def parse(self, value, config=None, pos=None):
if config is None:
config = {}
elif config["version_1_3_or_later"] and value == "":
return np.zeros(self._arraysize, dtype=self._base.format), True
parts = self._splitter(value, config, pos)
if len(parts) != self._items:
warn_or_raise(E02, E02, (self._items, len(parts)), config, pos)
if config.get("verify", "ignore") == "exception":
return self.parse_parts(parts, config, pos)
else:
if len(parts) == self._items:
pass
elif len(parts) > self._items:
parts = parts[: self._items]
else:
parts = parts + ([self._base.default] * (self._items - len(parts)))
return self.parse_parts(parts, config, pos)
def parse_parts(self, parts, config=None, pos=None):
base_parse = self._base.parse
result = []
result_mask = []
for x in parts:
value, mask = base_parse(x, config, pos)
result.append(value)
result_mask.append(mask)
result = np.array(result, dtype=self._base.format).reshape(self._arraysize)
result_mask = np.array(result_mask, dtype="bool").reshape(self._arraysize)
return result, result_mask
def output(self, value, mask):
base_output = self._base.output
value = np.asarray(value)
mask = np.asarray(mask)
if mask.size <= 1:
func = np.broadcast
else: # When mask is already array but value is scalar, this prevents broadcast
func = zip
return " ".join(base_output(x, m) for x, m in func(value.flat, mask.flat))
def binparse(self, read):
result = np.frombuffer(read(self._memsize), dtype=self._bigendian_format)[0]
result_mask = self._base.is_null(result)
return result, result_mask
def binoutput(self, value, mask):
filtered = self._base.filter_array(value, mask)
filtered = _ensure_bigendian(filtered)
return filtered.tobytes()
class Numeric(Converter):
"""
The base class for all numeric data types.
"""
array_type = NumericArray
vararray_type = ScalarVarArray
null = None
def __init__(self, field, config=None, pos=None):
Converter.__init__(self, field, config, pos)
self._memsize = np.dtype(self.format).itemsize
self._bigendian_format = ">" + self.format
if field.values.null is not None:
self.null = np.asarray(field.values.null, dtype=self.format)
self.default = self.null
self.is_null = self._is_null
else:
self.is_null = np.isnan
def binparse(self, read):
result = np.frombuffer(read(self._memsize), dtype=self._bigendian_format)
return result[0], self.is_null(result[0])
def _is_null(self, value):
return value == self.null
class FloatingPoint(Numeric):
"""
The base class for floating-point datatypes.
"""
default = np.nan
def __init__(self, field, config=None, pos=None):
if config is None:
config = {}
Numeric.__init__(self, field, config, pos)
precision = field.precision
width = field.width
if precision is None:
format_parts = ["{!r:>"]
else:
format_parts = ["{:"]
if width is not None:
format_parts.append(str(width))
if precision is not None:
if precision.startswith("E"):
format_parts.append(f".{int(precision[1:]):d}g")
elif precision.startswith("F"):
format_parts.append(f".{int(precision[1:]):d}f")
else:
format_parts.append(f".{int(precision):d}f")
format_parts.append("}")
self._output_format = "".join(format_parts)
self.nan = np.array(np.nan, self.format)
if self.null is None:
self._null_output = "NaN"
self._null_binoutput = self.binoutput(self.nan, False)
self.filter_array = self._filter_nan
else:
self._null_output = self.output(np.asarray(self.null), False)
self._null_binoutput = self.binoutput(np.asarray(self.null), False)
self.filter_array = self._filter_null
if config.get("verify", "ignore") == "exception":
self.parse = self._parse_pedantic
else:
self.parse = self._parse_permissive
def supports_empty_values(self, config):
return True
def _parse_pedantic(self, value, config=None, pos=None):
if value.strip() == "":
return self.null, True
f = float(value)
return f, self.is_null(f)
def _parse_permissive(self, value, config=None, pos=None):
try:
f = float(value)
return f, self.is_null(f)
except ValueError:
# IRSA VOTables use the word 'null' to specify empty values,
# but this is not defined in the VOTable spec.
if value.strip() != "":
vo_warn(W30, value, config, pos)
return self.null, True
@property
def output_format(self):
return self._output_format
def output(self, value, mask):
if mask:
return self._null_output
if np.isfinite(value):
if not np.isscalar(value):
value = value.dtype.type(value)
result = self._output_format.format(value)
if result.startswith("array"):
raise RuntimeError()
if self._output_format[2] == "r" and result.endswith(".0"):
result = result[:-2]
return result
elif np.isnan(value):
return "NaN"
elif np.isposinf(value):
return "+InF"
elif np.isneginf(value):
return "-InF"
# Should never raise
vo_raise(f"Invalid floating point value '{value}'")
def binoutput(self, value, mask):
if mask:
return self._null_binoutput
value = _ensure_bigendian(value)
return value.tobytes()
def _filter_nan(self, value, mask):
return np.where(mask, np.nan, value)
def _filter_null(self, value, mask):
return np.where(mask, self.null, value)
class Double(FloatingPoint):
"""
Handles the double datatype. Double-precision IEEE
floating-point.
"""
format = "f8"
class Float(FloatingPoint):
"""
Handles the float datatype. Single-precision IEEE floating-point.
"""
format = "f4"
class Integer(Numeric):
"""
The base class for all the integral datatypes.
"""
default = 0
def __init__(self, field, config=None, pos=None):
Numeric.__init__(self, field, config, pos)
def parse(self, value, config=None, pos=None):
if config is None:
config = {}
mask = False
if isinstance(value, str):
value = value.lower()
if value == "":
if config["version_1_3_or_later"]:
mask = True
else:
warn_or_raise(W49, W49, (), config, pos)
if self.null is not None:
value = self.null
else:
value = self.default
elif value == "nan":
mask = True
if self.null is None:
warn_or_raise(W31, W31, (), config, pos)
value = self.default
else:
value = self.null
elif value.startswith("0x"):
value = int(value[2:], 16)
else:
value = int(value, 10)
else:
value = int(value)
if self.null is not None and value == self.null:
mask = True
if value < self.val_range[0]:
warn_or_raise(W51, W51, (value, self.bit_size), config, pos)
value = self.val_range[0]
elif value > self.val_range[1]:
warn_or_raise(W51, W51, (value, self.bit_size), config, pos)
value = self.val_range[1]
return value, mask
def output(self, value, mask):
if mask:
if self.null is None:
warn_or_raise(W31, W31)
return "NaN"
return str(self.null)
return str(value)
def binoutput(self, value, mask):
if mask:
if self.null is None:
vo_raise(W31)
else:
value = self.null
value = _ensure_bigendian(value)
return value.tobytes()
def filter_array(self, value, mask):
if np.any(mask):
if self.null is not None:
return np.where(mask, self.null, value)
else:
vo_raise(W31)
return value
class UnsignedByte(Integer):
"""
Handles the unsignedByte datatype. Unsigned 8-bit integer.
"""
format = "u1"
val_range = (0, 255)
bit_size = "8-bit unsigned"
class Short(Integer):
"""
Handles the short datatype. Signed 16-bit integer.
"""
format = "i2"
val_range = (-32768, 32767)
bit_size = "16-bit"
class Int(Integer):
"""
Handles the int datatype. Signed 32-bit integer.
"""
format = "i4"
val_range = (-2147483648, 2147483647)
bit_size = "32-bit"
class Long(Integer):
"""
Handles the long datatype. Signed 64-bit integer.
"""
format = "i8"
val_range = (-9223372036854775808, 9223372036854775807)
bit_size = "64-bit"
class ComplexArrayVarArray(VarArray):
"""
Handles an array of variable-length arrays of complex numbers.
"""
def parse(self, value, config=None, pos=None):
if value.strip() == "":
return ma.array([]), True
parts = self._splitter(value, config, pos)
items = self._base._items
parse_parts = self._base.parse_parts
if len(parts) % items != 0:
vo_raise(E02, (items, len(parts)), config, pos)
result = []
result_mask = []
for i in range(0, len(parts), items):
value, mask = parse_parts(parts[i : i + items], config, pos)
result.append(value)
result_mask.append(mask)
return _make_masked_array(result, result_mask), False
class ComplexVarArray(VarArray):
"""
Handles a variable-length array of complex numbers.
"""
def parse(self, value, config=None, pos=None):
if value.strip() == "":
return ma.array([]), True
parts = self._splitter(value, config, pos)
parse_parts = self._base.parse_parts
result = []
result_mask = []
for i in range(0, len(parts), 2):
value = [float(x) for x in parts[i : i + 2]]
value, mask = parse_parts(value, config, pos)
result.append(value)
result_mask.append(mask)
return (
_make_masked_array(np.array(result, dtype=self._base.format), result_mask),
False,
)
class ComplexArray(NumericArray):
"""
Handles a fixed-size array of complex numbers.
"""
vararray_type = ComplexArrayVarArray
def __init__(self, field, base, arraysize, config=None, pos=None):
NumericArray.__init__(self, field, base, arraysize, config, pos)
self._items *= 2
def parse(self, value, config=None, pos=None):
parts = self._splitter(value, config, pos)
if parts == [""]:
parts = []
return self.parse_parts(parts, config, pos)
def parse_parts(self, parts, config=None, pos=None):
if len(parts) != self._items:
vo_raise(E02, (self._items, len(parts)), config, pos)
base_parse = self._base.parse_parts
result = []
result_mask = []
for i in range(0, self._items, 2):
value = [float(x) for x in parts[i : i + 2]]
value, mask = base_parse(value, config, pos)
result.append(value)
result_mask.append(mask)
result = np.array(result, dtype=self._base.format).reshape(self._arraysize)
result_mask = np.array(result_mask, dtype="bool").reshape(self._arraysize)
return result, result_mask
class Complex(FloatingPoint, Array):
"""
The base class for complex numbers.
"""
array_type = ComplexArray
vararray_type = ComplexVarArray
default = np.nan
def __init__(self, field, config=None, pos=None):
FloatingPoint.__init__(self, field, config, pos)
Array.__init__(self, field, config, pos)
def parse(self, value, config=None, pos=None):
stripped = value.strip()
if stripped == "" or stripped.lower() == "nan":
return np.nan, True
splitter = self._splitter
parts = [float(x) for x in splitter(value, config, pos)]
if len(parts) != 2:
vo_raise(E03, (value,), config, pos)
return self.parse_parts(parts, config, pos)
_parse_permissive = parse
_parse_pedantic = parse
def parse_parts(self, parts, config=None, pos=None):
value = complex(*parts)
return value, self.is_null(value)
def output(self, value, mask):
if mask:
if self.null is None:
return "NaN"
else:
value = self.null
real = self._output_format.format(float(value.real))
imag = self._output_format.format(float(value.imag))
if self._output_format[2] == "r":
if real.endswith(".0"):
real = real[:-2]
if imag.endswith(".0"):
imag = imag[:-2]
return real + " " + imag
class FloatComplex(Complex):
"""
Handle floatComplex datatype. Pair of single-precision IEEE
floating-point numbers.
"""
format = "c8"
class DoubleComplex(Complex):
"""
Handle doubleComplex datatype. Pair of double-precision IEEE
floating-point numbers.
"""
format = "c16"
class BitArray(NumericArray):
"""
Handles an array of bits.
"""
vararray_type = ArrayVarArray
def __init__(self, field, base, arraysize, config=None, pos=None):
NumericArray.__init__(self, field, base, arraysize, config, pos)
self._bytes = ((self._items - 1) // 8) + 1
@staticmethod
def _splitter_pedantic(value, config=None, pos=None):
return list(re.sub(r"\s", "", value))
@staticmethod
def _splitter_lax(value, config=None, pos=None):
if "," in value:
vo_warn(W01, (), config, pos)
return list(re.sub(r"\s|,", "", value))
def output(self, value, mask):
if np.any(mask):
vo_warn(W39)
value = np.asarray(value)
mapping = {False: "0", True: "1"}
return "".join(mapping[x] for x in value.flat)
def binparse(self, read):
data = read(self._bytes)
result = bitarray_to_bool(data, self._items)
result = result.reshape(self._arraysize)
result_mask = np.zeros(self._arraysize, dtype="b1")
return result, result_mask
def binoutput(self, value, mask):
if np.any(mask):
vo_warn(W39)
return bool_to_bitarray(value)
class Bit(Converter):
"""
Handles the bit datatype.
"""
format = "b1"
array_type = BitArray
vararray_type = ScalarVarArray
default = False
binary_one = b"\x08"
binary_zero = b"\0"
def parse(self, value, config=None, pos=None):
if config is None:
config = {}
mapping = {"1": True, "0": False}
if value is False or value.strip() == "":
if not config["version_1_3_or_later"]:
warn_or_raise(W49, W49, (), config, pos)
return False, True
else:
try:
return mapping[value], False
except KeyError:
vo_raise(E04, (value,), config, pos)
def output(self, value, mask):
if mask:
vo_warn(W39)
if value:
return "1"
else:
return "0"
def binparse(self, read):
data = read(1)
return (ord(data) & 0x8) != 0, False
def binoutput(self, value, mask):
if mask:
vo_warn(W39)
if value:
return self.binary_one
return self.binary_zero
class BooleanArray(NumericArray):
"""
Handles an array of boolean values.
"""
vararray_type = ArrayVarArray
def binparse(self, read):
data = read(self._items)
binparse = self._base.binparse_value
result = []
result_mask = []
for char in data:
value, mask = binparse(char)
result.append(value)
result_mask.append(mask)
result = np.array(result, dtype="b1").reshape(self._arraysize)
result_mask = np.array(result_mask, dtype="b1").reshape(self._arraysize)
return result, result_mask
def binoutput(self, value, mask):
binoutput = self._base.binoutput
value = np.asarray(value)
mask = np.asarray(mask)
result = [binoutput(x, m) for x, m in np.broadcast(value.flat, mask.flat)]
return _empty_bytes.join(result)
class Boolean(Converter):
"""
Handles the boolean datatype.
"""
format = "b1"
array_type = BooleanArray
vararray_type = ScalarVarArray
default = False
binary_question_mark = b"?"
binary_true = b"T"
binary_false = b"F"
def parse(self, value, config=None, pos=None):
if value == "":
return False, True
if value is False:
return False, True
mapping = {
"TRUE": (True, False),
"FALSE": (False, False),
"1": (True, False),
"0": (False, False),
"T": (True, False),
"F": (False, False),
"\0": (False, True),
" ": (False, True),
"?": (False, True),
"": (False, True),
}
try:
return mapping[value.upper()]
except KeyError:
vo_raise(E05, (value,), config, pos)
def output(self, value, mask):
if mask:
return "?"
if value:
return "T"
return "F"
def binparse(self, read):
value = ord(read(1))
return self.binparse_value(value)
_binparse_mapping = {
ord("T"): (True, False),
ord("t"): (True, False),
ord("1"): (True, False),
ord("F"): (False, False),
ord("f"): (False, False),
ord("0"): (False, False),
ord("\0"): (False, True),
ord(" "): (False, True),
ord("?"): (False, True),
}
def binparse_value(self, value):
try:
return self._binparse_mapping[value]
except KeyError:
vo_raise(E05, (value,))
def binoutput(self, value, mask):
if mask:
return self.binary_question_mark
if value:
return self.binary_true
return self.binary_false
converter_mapping = {
"double": Double,
"float": Float,
"bit": Bit,
"boolean": Boolean,
"unsignedByte": UnsignedByte,
"short": Short,
"int": Int,
"long": Long,
"floatComplex": FloatComplex,
"doubleComplex": DoubleComplex,
"char": Char,
"unicodeChar": UnicodeChar,
}
def get_converter(field, config=None, pos=None):
"""
Get an appropriate converter instance for a given field.
Parameters
----------
field : astropy.io.votable.tree.Field
config : dict, optional
Parser configuration dictionary
pos : tuple
Position in the input XML file. Used for error messages.
Returns
-------
converter : astropy.io.votable.converters.Converter
"""
if config is None:
config = {}
if field.datatype not in converter_mapping:
vo_raise(E06, (field.datatype, field.ID), config)
cls = converter_mapping[field.datatype]
converter = cls(field, config, pos)
arraysize = field.arraysize
# With numeric datatypes, special things need to happen for
# arrays.
if field.datatype not in ("char", "unicodeChar") and arraysize is not None:
if arraysize[-1] == "*":
arraysize = arraysize[:-1]
last_x = arraysize.rfind("x")
if last_x == -1:
arraysize = ""
else:
arraysize = arraysize[:last_x]
fixed = False
else:
fixed = True
if arraysize != "":
arraysize = [int(x) for x in arraysize.split("x")]
arraysize.reverse()
else:
arraysize = []
if arraysize != []:
converter = converter.array_type(field, converter, arraysize, config)
if not fixed:
converter = converter.vararray_type(field, converter, arraysize, config)
return converter
numpy_dtype_to_field_mapping = {
np.float64().dtype.num: "double",
np.float32().dtype.num: "float",
np.bool_().dtype.num: "bit",
np.uint8().dtype.num: "unsignedByte",
np.int16().dtype.num: "short",
np.int32().dtype.num: "int",
np.int64().dtype.num: "long",
np.complex64().dtype.num: "floatComplex",
np.complex128().dtype.num: "doubleComplex",
np.unicode_().dtype.num: "unicodeChar",
}
numpy_dtype_to_field_mapping[np.bytes_().dtype.num] = "char"
def _all_matching_dtype(column):
first_dtype = False
first_shape = ()
for x in column:
if not isinstance(x, np.ndarray) or len(x) == 0:
continue
if first_dtype is False:
first_dtype = x.dtype
first_shape = x.shape[1:]
elif first_dtype != x.dtype:
return False, ()
elif first_shape != x.shape[1:]:
first_shape = ()
return first_dtype, first_shape
def numpy_to_votable_dtype(dtype, shape):
"""
Converts a numpy dtype and shape to a dictionary of attributes for
a VOTable FIELD element and correspond to that type.
Parameters
----------
dtype : Numpy dtype instance
shape : tuple
Returns
-------
attributes : dict
A dict containing 'datatype' and 'arraysize' keys that can be
set on a VOTable FIELD element.
"""
if dtype.num not in numpy_dtype_to_field_mapping:
raise TypeError(f"{dtype!r} can not be represented in VOTable")
if dtype.char == "S":
return {"datatype": "char", "arraysize": str(dtype.itemsize)}
elif dtype.char == "U":
return {"datatype": "unicodeChar", "arraysize": str(dtype.itemsize // 4)}
else:
result = {"datatype": numpy_dtype_to_field_mapping[dtype.num]}
if len(shape):
result["arraysize"] = "x".join(str(x) for x in shape)
return result
def table_column_to_votable_datatype(column):
"""
Given a `astropy.table.Column` instance, returns the attributes
necessary to create a VOTable FIELD element that corresponds to
the type of the column.
This necessarily must perform some heuristics to determine the
type of variable length arrays fields, since they are not directly
supported by Numpy.
If the column has dtype of "object", it performs the following
tests:
- If all elements are byte or unicode strings, it creates a
variable-length byte or unicode field, respectively.
- If all elements are numpy arrays of the same dtype and with a
consistent shape in all but the first dimension, it creates a
variable length array of fixed sized arrays. If the dtypes
match, but the shapes do not, a variable length array is
created.
If the dtype of the input is not understood, it sets the data type
to the most inclusive: a variable length unicodeChar array.
Parameters
----------
column : `astropy.table.Column` instance
Returns
-------
attributes : dict
A dict containing 'datatype' and 'arraysize' keys that can be
set on a VOTable FIELD element.
"""
votable_string_dtype = None
if column.info.meta is not None:
votable_string_dtype = column.info.meta.get("_votable_string_dtype")
if column.dtype.char == "O":
if votable_string_dtype is not None:
return {"datatype": votable_string_dtype, "arraysize": "*"}
elif isinstance(column[0], np.ndarray):
dtype, shape = _all_matching_dtype(column)
if dtype is not False:
result = numpy_to_votable_dtype(dtype, shape)
if "arraysize" not in result:
result["arraysize"] = "*"
else:
result["arraysize"] += "*"
return result
# All bets are off, do the most generic thing
return {"datatype": "unicodeChar", "arraysize": "*"}
# For fixed size string columns, datatype here will be unicodeChar,
# but honor the original FIELD datatype if present.
result = numpy_to_votable_dtype(column.dtype, column.shape[1:])
if result["datatype"] == "unicodeChar" and votable_string_dtype == "char":
result["datatype"] = "char"
return result
|
fcd6dff09ce82061cf2c9dfba64488a37ded7eaf8bac50bdc877b8ad757618fc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from astropy.io import registry as io_registry
from astropy.table import Table
from astropy.table.column import BaseColumn
from astropy.units import Quantity
from astropy.utils.misc import NOT_OVERWRITING_MSG
from . import from_table, parse
from .tree import Table as VOTable
from .tree import VOTableFile
def is_votable(origin, filepath, fileobj, *args, **kwargs):
"""
Reads the header of a file to determine if it is a VOTable file.
Parameters
----------
origin : str or readable file-like
Path or file object containing a VOTABLE_ xml file.
Returns
-------
is_votable : bool
Returns `True` if the given file is a VOTable file.
"""
from . import is_votable
if origin == "read":
if fileobj is not None:
try:
result = is_votable(fileobj)
finally:
fileobj.seek(0)
return result
elif filepath is not None:
return is_votable(filepath)
return isinstance(args[0], (VOTableFile, VOTable))
else:
return False
def read_table_votable(
input, table_id=None, use_names_over_ids=False, verify=None, **kwargs
):
"""
Read a Table object from an VO table file.
Parameters
----------
input : str or `~astropy.io.votable.tree.VOTableFile` or `~astropy.io.votable.tree.Table`
If a string, the filename to read the table from. If a
:class:`~astropy.io.votable.tree.VOTableFile` or
:class:`~astropy.io.votable.tree.Table` object, the object to extract
the table from.
table_id : str or int, optional
The table to read in. If a `str`, it is an ID corresponding
to the ID of the table in the file (not all VOTable files
assign IDs to their tables). If an `int`, it is the index of
the table in the file, starting at 0.
use_names_over_ids : bool, optional
When `True` use the ``name`` attributes of columns as the names
of columns in the `~astropy.table.Table` instance. Since names
are not guaranteed to be unique, this may cause some columns
to be renamed by appending numbers to the end. Otherwise
(default), use the ID attributes as the column names.
verify : {'ignore', 'warn', 'exception'}, optional
When ``'exception'``, raise an error when the file violates the spec,
otherwise either issue a warning (``'warn'``) or silently continue
(``'ignore'``). Warnings may be controlled using the standard Python
mechanisms. See the `warnings` module in the Python standard library
for more information. When not provided, uses the configuration setting
``astropy.io.votable.verify``, which defaults to ``'ignore'``.
**kwargs
Additional keyword arguments are passed on to
:func:`astropy.io.votable.table.parse`.
"""
if not isinstance(input, (VOTableFile, VOTable)):
input = parse(input, table_id=table_id, verify=verify, **kwargs)
# Parse all table objects
table_id_mapping = dict()
tables = []
if isinstance(input, VOTableFile):
for table in input.iter_tables():
if table.ID is not None:
table_id_mapping[table.ID] = table
tables.append(table)
if len(tables) > 1:
if table_id is None:
raise ValueError(
"Multiple tables found: table id should be set via the table_id="
" argument. The available tables are"
f" {', '.join(table_id_mapping)}, or integers less than"
f" {len(tables)}."
)
elif isinstance(table_id, str):
if table_id in table_id_mapping:
table = table_id_mapping[table_id]
else:
raise ValueError(f"No tables with id={table_id} found")
elif isinstance(table_id, int):
if table_id < len(tables):
table = tables[table_id]
else:
raise IndexError(
f"Table index {table_id} is out of range. {len(tables)} tables"
" found"
)
elif len(tables) == 1:
table = tables[0]
else:
raise ValueError("No table found")
elif isinstance(input, VOTable):
table = input
# Convert to an astropy.table.Table object
return table.to_table(use_names_over_ids=use_names_over_ids)
def write_table_votable(
input, output, table_id=None, overwrite=False, tabledata_format=None
):
"""
Write a Table object to an VO table file.
Parameters
----------
input : Table
The table to write out.
output : str
The filename to write the table to.
table_id : str, optional
The table ID to use. If this is not specified, the 'ID' keyword in the
``meta`` object of the table will be used.
overwrite : bool, optional
Whether to overwrite any existing file without warning.
tabledata_format : str, optional
The format of table data to write. Must be one of ``tabledata``
(text representation), ``binary`` or ``binary2``. Default is
``tabledata``. See :ref:`astropy:votable-serialization`.
"""
# Only those columns which are instances of BaseColumn or Quantity can be written
unsupported_cols = input.columns.not_isinstance((BaseColumn, Quantity))
if unsupported_cols:
unsupported_names = [col.info.name for col in unsupported_cols]
raise ValueError(
f"cannot write table with mixin column(s) {unsupported_names} to VOTable"
)
# Check if output file already exists
if isinstance(output, str) and os.path.exists(output):
if overwrite:
os.remove(output)
else:
raise OSError(NOT_OVERWRITING_MSG.format(output))
# Create a new VOTable file
table_file = from_table(input, table_id=table_id)
# Write out file
table_file.to_xml(output, tabledata_format=tabledata_format)
io_registry.register_reader("votable", Table, read_table_votable)
io_registry.register_writer("votable", Table, write_table_votable)
io_registry.register_identifier("votable", Table, is_votable)
|
2eef0ff6c21da81751112a5e5d6a53da7530c619bc9dafc8486a2211a2bbb181 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# TODO: Test FITS parsing
# STDLIB
import base64
import codecs
import gzip
import io
import re
import urllib.request
import warnings
# THIRD-PARTY
import numpy as np
from numpy import ma
# LOCAL
from astropy import __version__ as astropy_version
from astropy.io import fits
from astropy.utils.collections import HomogeneousList
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.utils.xml.writer import XMLWriter
from . import converters
from . import ucd as ucd_mod
from . import util, xmlutil
from .exceptions import (
E06,
E08,
E09,
E10,
E11,
E12,
E13,
E15,
E16,
E17,
E18,
E19,
E20,
E21,
E22,
E23,
E25,
W06,
W07,
W08,
W09,
W10,
W11,
W12,
W13,
W15,
W17,
W18,
W19,
W20,
W21,
W22,
W26,
W27,
W28,
W29,
W32,
W33,
W35,
W36,
W37,
W38,
W40,
W41,
W42,
W43,
W44,
W45,
W50,
W52,
W53,
W54,
vo_raise,
vo_reraise,
vo_warn,
warn_or_raise,
warn_unknown_attrs,
)
try:
from . import tablewriter
_has_c_tabledata_writer = True
except ImportError:
_has_c_tabledata_writer = False
__all__ = [
"Link",
"Info",
"Values",
"Field",
"Param",
"CooSys",
"TimeSys",
"FieldRef",
"ParamRef",
"Group",
"Table",
"Resource",
"VOTableFile",
"Element",
]
# The default number of rows to read in each chunk before converting
# to an array.
DEFAULT_CHUNK_SIZE = 256
RESIZE_AMOUNT = 1.5
######################################################################
# FACTORY FUNCTIONS
def _resize(masked, new_size):
"""
Masked arrays can not be resized inplace, and `np.resize` and
`ma.resize` are both incompatible with structured arrays.
Therefore, we do all this.
"""
new_array = ma.zeros((new_size,), dtype=masked.dtype)
length = min(len(masked), new_size)
new_array[:length] = masked[:length]
return new_array
def _lookup_by_attr_factory(attr, unique, iterator, element_name, doc):
"""
Creates a function useful for looking up an element by a given
attribute.
Parameters
----------
attr : str
The attribute name
unique : bool
Should be `True` if the attribute is unique and therefore this
should return only one value. Otherwise, returns a list of
values.
iterator : generator
A generator that iterates over some arbitrary set of elements
element_name : str
The XML element name of the elements being iterated over (used
for error messages only).
doc : str
A docstring to apply to the generated function.
Returns
-------
factory : function
A function that looks up an element by the given attribute.
"""
def lookup_by_attr(self, ref, before=None):
"""
Given a string *ref*, finds the first element in the iterator
where the given attribute == *ref*. If *before* is provided,
will stop searching at the object *before*. This is
important, since "forward references" are not allowed in the
VOTABLE format.
"""
for element in getattr(self, iterator)():
if element is before:
if getattr(element, attr, None) == ref:
vo_raise(
f"{element_name} references itself",
element._config,
element._pos,
KeyError,
)
break
if getattr(element, attr, None) == ref:
yield element
def lookup_by_attr_unique(self, ref, before=None):
for element in lookup_by_attr(self, ref, before=before):
return element
raise KeyError(
"No {} with {} '{}' found before the referencing {}".format(
element_name, attr, ref, element_name
)
)
if unique:
lookup_by_attr_unique.__doc__ = doc
return lookup_by_attr_unique
else:
lookup_by_attr.__doc__ = doc
return lookup_by_attr
def _lookup_by_id_or_name_factory(iterator, element_name, doc):
"""
Like `_lookup_by_attr_factory`, but looks in both the "ID" and
"name" attributes.
"""
def lookup_by_id_or_name(self, ref, before=None):
"""
Given an key *ref*, finds the first element in the iterator
with the attribute ID == *ref* or name == *ref*. If *before*
is provided, will stop searching at the object *before*. This
is important, since "forward references" are not allowed in
the VOTABLE format.
"""
for element in getattr(self, iterator)():
if element is before:
if ref in (element.ID, element.name):
vo_raise(
f"{element_name} references itself",
element._config,
element._pos,
KeyError,
)
break
if ref in (element.ID, element.name):
return element
raise KeyError(
"No {} with ID or name '{}' found before the referencing {}".format(
element_name, ref, element_name
)
)
lookup_by_id_or_name.__doc__ = doc
return lookup_by_id_or_name
def _get_default_unit_format(config):
"""
Get the default unit format as specified in the VOTable spec.
"""
# The unit format changed between VOTable versions 1.3 and 1.4,
# see issue #10791.
if config["version_1_4_or_later"]:
return "vounit"
else:
return "cds"
def _get_unit_format(config):
"""
Get the unit format based on the configuration.
"""
if config.get("unit_format") is None:
format = _get_default_unit_format(config)
else:
format = config["unit_format"]
return format
######################################################################
# ATTRIBUTE CHECKERS
def check_astroyear(year, field, config=None, pos=None):
"""
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if
*year* is not a valid astronomical year as defined by the VOTABLE
standard.
Parameters
----------
year : str
An astronomical year string
field : str
The name of the field this year was found in (used for error
message)
config, pos : optional
Information about the source of the value
"""
if year is not None and re.match(r"^[JB]?[0-9]+([.][0-9]*)?$", year) is None:
warn_or_raise(W07, W07, (field, year), config, pos)
return False
return True
def check_string(string, attr_name, config=None, pos=None):
"""
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if
*string* is not a string or Unicode string.
Parameters
----------
string : str
An astronomical year string
attr_name : str
The name of the field this year was found in (used for error
message)
config, pos : optional
Information about the source of the value
"""
if string is not None and not isinstance(string, str):
warn_or_raise(W08, W08, attr_name, config, pos)
return False
return True
def resolve_id(ID, id, config=None, pos=None):
if ID is None and id is not None:
warn_or_raise(W09, W09, (), config, pos)
return id
return ID
def check_ucd(ucd, config=None, pos=None):
"""
Warns or raises a
`~astropy.io.votable.exceptions.VOTableSpecError` if *ucd* is not
a valid `unified content descriptor`_ string as defined by the
VOTABLE standard.
Parameters
----------
ucd : str
A UCD string.
config, pos : optional
Information about the source of the value
"""
if config is None:
config = {}
if config.get("version_1_1_or_later"):
try:
ucd_mod.parse_ucd(
ucd,
check_controlled_vocabulary=config.get("version_1_2_or_later", False),
has_colon=config.get("version_1_2_or_later", False),
)
except ValueError as e:
# This weird construction is for Python 3 compatibility
if config.get("verify", "ignore") == "exception":
vo_raise(W06, (ucd, str(e)), config, pos)
elif config.get("verify", "ignore") == "warn":
vo_warn(W06, (ucd, str(e)), config, pos)
return False
else:
return False
return True
######################################################################
# PROPERTY MIXINS
class _IDProperty:
@property
def ID(self):
"""
The XML ID_ of the element. May be `None` or a string
conforming to XML ID_ syntax.
"""
return self._ID
@ID.setter
def ID(self, ID):
xmlutil.check_id(ID, "ID", self._config, self._pos)
self._ID = ID
@ID.deleter
def ID(self):
self._ID = None
class _NameProperty:
@property
def name(self):
"""An optional name for the element."""
return self._name
@name.setter
def name(self, name):
xmlutil.check_token(name, "name", self._config, self._pos)
self._name = name
@name.deleter
def name(self):
self._name = None
class _XtypeProperty:
@property
def xtype(self):
"""Extended data type information."""
return self._xtype
@xtype.setter
def xtype(self, xtype):
if xtype is not None and not self._config.get("version_1_2_or_later"):
warn_or_raise(
W28, W28, ("xtype", self._element_name, "1.2"), self._config, self._pos
)
check_string(xtype, "xtype", self._config, self._pos)
self._xtype = xtype
@xtype.deleter
def xtype(self):
self._xtype = None
class _UtypeProperty:
_utype_in_v1_2 = False
@property
def utype(self):
"""The usage-specific or `unique type`_ of the element."""
return self._utype
@utype.setter
def utype(self, utype):
if (
self._utype_in_v1_2
and utype is not None
and not self._config.get("version_1_2_or_later")
):
warn_or_raise(
W28, W28, ("utype", self._element_name, "1.2"), self._config, self._pos
)
check_string(utype, "utype", self._config, self._pos)
self._utype = utype
@utype.deleter
def utype(self):
self._utype = None
class _UcdProperty:
_ucd_in_v1_2 = False
@property
def ucd(self):
"""The `unified content descriptor`_ for the element."""
return self._ucd
@ucd.setter
def ucd(self, ucd):
if ucd is not None and ucd.strip() == "":
ucd = None
if ucd is not None:
if self._ucd_in_v1_2 and not self._config.get("version_1_2_or_later"):
warn_or_raise(
W28,
W28,
("ucd", self._element_name, "1.2"),
self._config,
self._pos,
)
check_ucd(ucd, self._config, self._pos)
self._ucd = ucd
@ucd.deleter
def ucd(self):
self._ucd = None
class _DescriptionProperty:
@property
def description(self):
"""
An optional string describing the element. Corresponds to the
DESCRIPTION_ element.
"""
return self._description
@description.setter
def description(self, description):
self._description = description
@description.deleter
def description(self):
self._description = None
######################################################################
# ELEMENT CLASSES
class Element:
"""
A base class for all classes that represent XML elements in the
VOTABLE file.
"""
_element_name = ""
_attr_list = []
def _add_unknown_tag(self, iterator, tag, data, config, pos):
warn_or_raise(W10, W10, tag, config, pos)
def _ignore_add(self, iterator, tag, data, config, pos):
warn_unknown_attrs(tag, data.keys(), config, pos)
def _add_definitions(self, iterator, tag, data, config, pos):
if config.get("version_1_1_or_later"):
warn_or_raise(W22, W22, (), config, pos)
warn_unknown_attrs(tag, data.keys(), config, pos)
def parse(self, iterator, config):
"""
For internal use. Parse the XML content of the children of the
element.
Parameters
----------
iterator : xml iterable
An iterator over XML elements as returned by
`~astropy.utils.xml.iterparser.get_xml_iterator`.
config : dict
The configuration dictionary that affects how certain
elements are read.
Returns
-------
self : `~astropy.io.votable.tree.Element`
Returns self as a convenience.
"""
raise NotImplementedError()
def to_xml(self, w, **kwargs):
"""
For internal use. Output the element to XML.
Parameters
----------
w : astropy.utils.xml.writer.XMLWriter object
An XML writer to write to.
**kwargs : dict
Any configuration parameters to control the output.
"""
raise NotImplementedError()
class SimpleElement(Element):
"""
A base class for simple elements, such as FIELD, PARAM and INFO
that don't require any special parsing or outputting machinery.
"""
def __init__(self):
Element.__init__(self)
def __repr__(self):
buff = io.StringIO()
SimpleElement.to_xml(self, XMLWriter(buff))
return buff.getvalue().strip()
def parse(self, iterator, config):
for start, tag, data, pos in iterator:
if start and tag != self._element_name:
self._add_unknown_tag(iterator, tag, data, config, pos)
elif tag == self._element_name:
break
return self
def to_xml(self, w, **kwargs):
w.element(self._element_name, attrib=w.object_attrs(self, self._attr_list))
class SimpleElementWithContent(SimpleElement):
"""
A base class for simple elements, such as FIELD, PARAM and INFO
that don't require any special parsing or outputting machinery.
"""
def __init__(self):
SimpleElement.__init__(self)
self._content = None
def parse(self, iterator, config):
for start, tag, data, pos in iterator:
if start and tag != self._element_name:
self._add_unknown_tag(iterator, tag, data, config, pos)
elif tag == self._element_name:
if data:
self.content = data
break
return self
def to_xml(self, w, **kwargs):
w.element(
self._element_name,
self._content,
attrib=w.object_attrs(self, self._attr_list),
)
@property
def content(self):
"""The content of the element."""
return self._content
@content.setter
def content(self, content):
check_string(content, "content", self._config, self._pos)
self._content = content
@content.deleter
def content(self):
self._content = None
class Link(SimpleElement, _IDProperty):
"""
LINK_ elements: used to reference external documents and servers through a URI.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_attr_list = [
"ID",
"content_role",
"content_type",
"title",
"value",
"href",
"action",
]
_element_name = "LINK"
def __init__(
self,
ID=None,
title=None,
value=None,
href=None,
action=None,
id=None,
config=None,
pos=None,
**kwargs,
):
if config is None:
config = {}
self._config = config
self._pos = pos
SimpleElement.__init__(self)
content_role = kwargs.get("content-role") or kwargs.get("content_role")
content_type = kwargs.get("content-type") or kwargs.get("content_type")
if "gref" in kwargs:
warn_or_raise(W11, W11, (), config, pos)
self.ID = resolve_id(ID, id, config, pos)
self.content_role = content_role
self.content_type = content_type
self.title = title
self.value = value
self.href = href
self.action = action
warn_unknown_attrs(
"LINK",
kwargs.keys(),
config,
pos,
["content-role", "content_role", "content-type", "content_type", "gref"],
)
@property
def content_role(self):
"""Defines the MIME role of the referenced object.
Must be one of:
None, 'query', 'hints', 'doc', 'location' or 'type'
"""
return self._content_role
@content_role.setter
def content_role(self, content_role):
if (
content_role == "type" and not self._config["version_1_3_or_later"]
) or content_role not in (None, "query", "hints", "doc", "location"):
vo_warn(W45, (content_role,), self._config, self._pos)
self._content_role = content_role
@content_role.deleter
def content_role(self):
self._content_role = None
@property
def content_type(self):
"""Defines the MIME content type of the referenced object."""
return self._content_type
@content_type.setter
def content_type(self, content_type):
xmlutil.check_mime_content_type(content_type, self._config, self._pos)
self._content_type = content_type
@content_type.deleter
def content_type(self):
self._content_type = None
@property
def href(self):
"""
A URI to an arbitrary protocol. The vo package only supports
http and anonymous ftp.
"""
return self._href
@href.setter
def href(self, href):
xmlutil.check_anyuri(href, self._config, self._pos)
self._href = href
@href.deleter
def href(self):
self._href = None
def to_table_column(self, column):
meta = {}
for key in self._attr_list:
val = getattr(self, key, None)
if val is not None:
meta[key] = val
column.meta.setdefault("links", [])
column.meta["links"].append(meta)
@classmethod
def from_table_column(cls, d):
return cls(**d)
class Info(SimpleElementWithContent, _IDProperty, _XtypeProperty, _UtypeProperty):
"""
INFO_ elements: arbitrary key-value pairs for extensions to the standard.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_element_name = "INFO"
_attr_list_11 = ["ID", "name", "value"]
_attr_list_12 = _attr_list_11 + ["xtype", "ref", "unit", "ucd", "utype"]
_utype_in_v1_2 = True
def __init__(
self,
ID=None,
name=None,
value=None,
id=None,
xtype=None,
ref=None,
unit=None,
ucd=None,
utype=None,
config=None,
pos=None,
**extra,
):
if config is None:
config = {}
self._config = config
self._pos = pos
SimpleElementWithContent.__init__(self)
self.ID = resolve_id(ID, id, config, pos) or xmlutil.fix_id(name, config, pos)
self.name = name
self.value = value
self.xtype = xtype
self.ref = ref
self.unit = unit
self.ucd = ucd
self.utype = utype
if config.get("version_1_2_or_later"):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if xtype is not None:
warn_unknown_attrs("INFO", ["xtype"], config, pos)
if ref is not None:
warn_unknown_attrs("INFO", ["ref"], config, pos)
if unit is not None:
warn_unknown_attrs("INFO", ["unit"], config, pos)
if ucd is not None:
warn_unknown_attrs("INFO", ["ucd"], config, pos)
if utype is not None:
warn_unknown_attrs("INFO", ["utype"], config, pos)
warn_unknown_attrs("INFO", extra.keys(), config, pos)
@property
def name(self):
"""[*required*] The key of the key-value pair."""
return self._name
@name.setter
def name(self, name):
if name is None:
warn_or_raise(W35, W35, "name", self._config, self._pos)
xmlutil.check_token(name, "name", self._config, self._pos)
self._name = name
@property
def value(self):
"""
[*required*] The value of the key-value pair. (Always stored
as a string or unicode string).
"""
return self._value
@value.setter
def value(self, value):
if value is None:
warn_or_raise(W35, W35, "value", self._config, self._pos)
check_string(value, "value", self._config, self._pos)
self._value = value
@property
def content(self):
"""The content inside the INFO element."""
return self._content
@content.setter
def content(self, content):
check_string(content, "content", self._config, self._pos)
self._content = content
@content.deleter
def content(self):
self._content = None
@property
def ref(self):
"""
Refer to another INFO_ element by ID_, defined previously in
the document.
"""
return self._ref
@ref.setter
def ref(self, ref):
if ref is not None and not self._config.get("version_1_2_or_later"):
warn_or_raise(W28, W28, ("ref", "INFO", "1.2"), self._config, self._pos)
xmlutil.check_id(ref, "ref", self._config, self._pos)
# TODO: actually apply the reference
# if ref is not None:
# try:
# other = self._votable.get_values_by_id(ref, before=self)
# except KeyError:
# vo_raise(
# "VALUES ref='%s', which has not already been defined." %
# self.ref, self._config, self._pos, KeyError)
# self.null = other.null
# self.type = other.type
# self.min = other.min
# self.min_inclusive = other.min_inclusive
# self.max = other.max
# self.max_inclusive = other.max_inclusive
# self._options[:] = other.options
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def unit(self):
"""A string specifying the units_ for the INFO_."""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
return
from astropy import units as u
if not self._config.get("version_1_2_or_later"):
warn_or_raise(W28, W28, ("unit", "INFO", "1.2"), self._config, self._pos)
# First, parse the unit in the default way, so that we can
# still emit a warning if the unit is not to spec.
default_format = _get_default_unit_format(self._config)
unit_obj = u.Unit(unit, format=default_format, parse_strict="silent")
if isinstance(unit_obj, u.UnrecognizedUnit):
warn_or_raise(W50, W50, (unit,), self._config, self._pos)
format = _get_unit_format(self._config)
if format != default_format:
unit_obj = u.Unit(unit, format=format, parse_strict="silent")
self._unit = unit_obj
@unit.deleter
def unit(self):
self._unit = None
def to_xml(self, w, **kwargs):
attrib = w.object_attrs(self, self._attr_list)
if "unit" in attrib:
attrib["unit"] = self.unit.to_string("cds")
w.element(self._element_name, self._content, attrib=attrib)
class Values(Element, _IDProperty):
"""
VALUES_ element: used within FIELD_ and PARAM_ elements to define the domain of values.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(
self,
votable,
field,
ID=None,
null=None,
ref=None,
type="legal",
id=None,
config=None,
pos=None,
**extras,
):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self._votable = votable
self._field = field
self.ID = resolve_id(ID, id, config, pos)
self.null = null
self._ref = ref
self.type = type
self.min = None
self.max = None
self.min_inclusive = True
self.max_inclusive = True
self._options = []
warn_unknown_attrs("VALUES", extras.keys(), config, pos)
def __repr__(self):
buff = io.StringIO()
self.to_xml(XMLWriter(buff))
return buff.getvalue().strip()
@property
def null(self):
"""
For integral datatypes, *null* is used to define the value
used for missing values.
"""
return self._null
@null.setter
def null(self, null):
if null is not None and isinstance(null, str):
try:
null_val = self._field.converter.parse_scalar(
null, self._config, self._pos
)[0]
except Exception:
warn_or_raise(W36, W36, null, self._config, self._pos)
null_val = self._field.converter.parse_scalar(
"0", self._config, self._pos
)[0]
else:
null_val = null
self._null = null_val
@null.deleter
def null(self):
self._null = None
@property
def type(self):
"""Defines the applicability of the domain defined by this VALUES_ element [*required*].
Must be one of the following strings:
- 'legal': The domain of this column applies in general to
this datatype. (default)
- 'actual': The domain of this column applies only to the
data enclosed in the parent table.
"""
return self._type
@type.setter
def type(self, type):
if type not in ("legal", "actual"):
vo_raise(E08, type, self._config, self._pos)
self._type = type
@property
def ref(self):
"""
Refer to another VALUES_ element by ID_, defined previously in
the document, for MIN/MAX/OPTION information.
"""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, "ref", self._config, self._pos)
if ref is not None:
try:
other = self._votable.get_values_by_id(ref, before=self)
except KeyError:
warn_or_raise(W43, W43, ("VALUES", self.ref), self._config, self._pos)
ref = None
else:
self.null = other.null
self.type = other.type
self.min = other.min
self.min_inclusive = other.min_inclusive
self.max = other.max
self.max_inclusive = other.max_inclusive
self._options[:] = other.options
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def min(self):
"""
The minimum value of the domain. See :attr:`min_inclusive`.
"""
return self._min
@min.setter
def min(self, min):
if hasattr(self._field, "converter") and min is not None:
self._min = self._field.converter.parse(min)[0]
else:
self._min = min
@min.deleter
def min(self):
self._min = None
@property
def min_inclusive(self):
"""When `True`, the domain includes the minimum value."""
return self._min_inclusive
@min_inclusive.setter
def min_inclusive(self, inclusive):
if inclusive == "yes":
self._min_inclusive = True
elif inclusive == "no":
self._min_inclusive = False
else:
self._min_inclusive = bool(inclusive)
@min_inclusive.deleter
def min_inclusive(self):
self._min_inclusive = True
@property
def max(self):
"""
The maximum value of the domain. See :attr:`max_inclusive`.
"""
return self._max
@max.setter
def max(self, max):
if hasattr(self._field, "converter") and max is not None:
self._max = self._field.converter.parse(max)[0]
else:
self._max = max
@max.deleter
def max(self):
self._max = None
@property
def max_inclusive(self):
"""When `True`, the domain includes the maximum value."""
return self._max_inclusive
@max_inclusive.setter
def max_inclusive(self, inclusive):
if inclusive == "yes":
self._max_inclusive = True
elif inclusive == "no":
self._max_inclusive = False
else:
self._max_inclusive = bool(inclusive)
@max_inclusive.deleter
def max_inclusive(self):
self._max_inclusive = True
@property
def options(self):
"""
A list of string key-value tuples defining other OPTION
elements for the domain. All options are ignored -- they are
stored for round-tripping purposes only.
"""
return self._options
def parse(self, iterator, config):
if self.ref is not None:
for start, tag, data, pos in iterator:
if start:
warn_or_raise(W44, W44, tag, config, pos)
else:
if tag != "VALUES":
warn_or_raise(W44, W44, tag, config, pos)
break
else:
for start, tag, data, pos in iterator:
if start:
if tag == "MIN":
if "value" not in data:
vo_raise(E09, "MIN", config, pos)
self.min = data["value"]
self.min_inclusive = data.get("inclusive", "yes")
warn_unknown_attrs(
"MIN", data.keys(), config, pos, ["value", "inclusive"]
)
elif tag == "MAX":
if "value" not in data:
vo_raise(E09, "MAX", config, pos)
self.max = data["value"]
self.max_inclusive = data.get("inclusive", "yes")
warn_unknown_attrs(
"MAX", data.keys(), config, pos, ["value", "inclusive"]
)
elif tag == "OPTION":
if "value" not in data:
vo_raise(E09, "OPTION", config, pos)
xmlutil.check_token(data.get("name"), "name", config, pos)
self.options.append((data.get("name"), data.get("value")))
warn_unknown_attrs(
"OPTION", data.keys(), config, pos, ["value", "name"]
)
elif tag == "VALUES":
break
return self
def is_defaults(self):
"""
Are the settings on this ``VALUE`` element all the same as the
XML defaults?.
"""
# If there's nothing meaningful or non-default to write,
# don't write anything.
return (
self.ref is None
and self.null is None
and self.ID is None
and self.max is None
and self.min is None
and self.options == []
)
def to_xml(self, w, **kwargs):
def yes_no(value):
if value:
return "yes"
return "no"
if self.is_defaults():
return
if self.ref is not None:
w.element("VALUES", attrib=w.object_attrs(self, ["ref"]))
else:
with w.tag("VALUES", attrib=w.object_attrs(self, ["ID", "null", "ref"])):
if self.min is not None:
w.element(
"MIN",
value=self._field.converter.output(self.min, False),
inclusive=yes_no(self.min_inclusive),
)
if self.max is not None:
w.element(
"MAX",
value=self._field.converter.output(self.max, False),
inclusive=yes_no(self.max_inclusive),
)
for name, value in self.options:
w.element("OPTION", name=name, value=value)
def to_table_column(self, column):
# Have the ref filled in here
meta = {}
for key in ["ID", "null"]:
val = getattr(self, key, None)
if val is not None:
meta[key] = val
if self.min is not None:
meta["min"] = {"value": self.min, "inclusive": self.min_inclusive}
if self.max is not None:
meta["max"] = {"value": self.max, "inclusive": self.max_inclusive}
if len(self.options):
meta["options"] = dict(self.options)
column.meta["values"] = meta
def from_table_column(self, column):
if column.info.meta is None or "values" not in column.info.meta:
return
meta = column.info.meta["values"]
for key in ["ID", "null"]:
val = meta.get(key, None)
if val is not None:
setattr(self, key, val)
if "min" in meta:
self.min = meta["min"]["value"]
self.min_inclusive = meta["min"]["inclusive"]
if "max" in meta:
self.max = meta["max"]["value"]
self.max_inclusive = meta["max"]["inclusive"]
if "options" in meta:
self._options = list(meta["options"].items())
class Field(
SimpleElement,
_IDProperty,
_NameProperty,
_XtypeProperty,
_UtypeProperty,
_UcdProperty,
):
"""
FIELD_ element: describes the datatype of a particular column of data.
The keyword arguments correspond to setting members of the same
name, documented below.
If *ID* is provided, it is used for the column name in the
resulting recarray of the table. If no *ID* is provided, *name*
is used instead. If neither is provided, an exception will be
raised.
"""
_attr_list_11 = [
"ID",
"name",
"datatype",
"arraysize",
"ucd",
"unit",
"width",
"precision",
"utype",
"ref",
]
_attr_list_12 = _attr_list_11 + ["xtype"]
_element_name = "FIELD"
def __init__(
self,
votable,
ID=None,
name=None,
datatype=None,
arraysize=None,
ucd=None,
unit=None,
width=None,
precision=None,
utype=None,
ref=None,
type=None,
id=None,
xtype=None,
config=None,
pos=None,
**extra,
):
if config is None:
if hasattr(votable, "_get_version_checks"):
config = votable._get_version_checks()
else:
config = {}
self._config = config
self._pos = pos
SimpleElement.__init__(self)
if config.get("version_1_2_or_later"):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if xtype is not None:
warn_unknown_attrs(self._element_name, ["xtype"], config, pos)
# TODO: REMOVE ME ----------------------------------------
# This is a terrible hack to support Simple Image Access
# Protocol results from https://astroarchive.noirlab.edu/ . It creates a field
# for the coordinate projection type of type "double", which
# actually contains character data. We have to hack the field
# to store character data, or we can't read it in. A warning
# will be raised when this happens.
if (
config.get("verify", "ignore") != "exception"
and name == "cprojection"
and ID == "cprojection"
and ucd == "VOX:WCS_CoordProjection"
and datatype == "double"
):
datatype = "char"
arraysize = "3"
vo_warn(W40, (), config, pos)
# ----------------------------------------
self.description = None
self._votable = votable
self.ID = resolve_id(ID, id, config, pos) or xmlutil.fix_id(name, config, pos)
self.name = name
if name is None:
if self._element_name == "PARAM" and not config.get("version_1_1_or_later"):
pass
else:
warn_or_raise(W15, W15, self._element_name, config, pos)
self.name = self.ID
if self._ID is None and name is None:
vo_raise(W12, self._element_name, config, pos)
datatype_mapping = {
"string": "char",
"unicodeString": "unicodeChar",
"int16": "short",
"int32": "int",
"int64": "long",
"float32": "float",
"float64": "double",
# The following appear in some Vizier tables
"unsignedInt": "long",
"unsignedShort": "int",
}
datatype_mapping.update(config.get("datatype_mapping", {}))
if datatype in datatype_mapping:
warn_or_raise(W13, W13, (datatype, datatype_mapping[datatype]), config, pos)
datatype = datatype_mapping[datatype]
self.ref = ref
self.datatype = datatype
self.arraysize = arraysize
self.ucd = ucd
self.unit = unit
self.width = width
self.precision = precision
self.utype = utype
self.type = type
self._links = HomogeneousList(Link)
self.title = self.name
self.values = Values(self._votable, self)
self.xtype = xtype
self._setup(config, pos)
warn_unknown_attrs(self._element_name, extra.keys(), config, pos)
@classmethod
def uniqify_names(cls, fields):
"""
Make sure that all names and titles in a list of fields are
unique, by appending numbers if necessary.
"""
unique = {}
for field in fields:
i = 2
new_id = field.ID
while new_id in unique:
new_id = field.ID + f"_{i:d}"
i += 1
if new_id != field.ID:
vo_warn(W32, (field.ID, new_id), field._config, field._pos)
field.ID = new_id
unique[new_id] = field.ID
for field in fields:
i = 2
if field.name is None:
new_name = field.ID
implicit = True
else:
new_name = field.name
implicit = False
if new_name != field.ID:
while new_name in unique:
new_name = field.name + f" {i:d}"
i += 1
if not implicit and new_name != field.name:
vo_warn(W33, (field.name, new_name), field._config, field._pos)
field._unique_name = new_name
unique[new_name] = field.name
def _setup(self, config, pos):
if self.values._ref is not None:
self.values.ref = self.values._ref
self.converter = converters.get_converter(self, config, pos)
@property
def datatype(self):
"""The datatype of the column [*required*].
Valid values (as defined by the spec) are:
'boolean', 'bit', 'unsignedByte', 'short', 'int', 'long',
'char', 'unicodeChar', 'float', 'double', 'floatComplex', or
'doubleComplex'
Many VOTABLE files in the wild use 'string' instead of 'char',
so that is also a valid option, though 'string' will always be
converted to 'char' when writing the file back out.
"""
return self._datatype
@datatype.setter
def datatype(self, datatype):
if datatype is None:
if self._config.get("version_1_1_or_later"):
warn_or_raise(E10, E10, self._element_name, self._config, self._pos)
datatype = "char"
if datatype not in converters.converter_mapping:
vo_raise(E06, (datatype, self.ID), self._config, self._pos)
self._datatype = datatype
@property
def precision(self):
"""
Along with :attr:`width`, defines the `numerical accuracy`_
associated with the data. These values are used to limit the
precision when writing floating point values back to the XML
file. Otherwise, it is purely informational -- the Numpy
recarray containing the data itself does not use this
information.
"""
return self._precision
@precision.setter
def precision(self, precision):
if precision is not None and not re.match(r"^[FE]?[0-9]+$", precision):
vo_raise(E11, precision, self._config, self._pos)
self._precision = precision
@precision.deleter
def precision(self):
self._precision = None
@property
def width(self):
"""
Along with :attr:`precision`, defines the `numerical
accuracy`_ associated with the data. These values are used to
limit the precision when writing floating point values back to
the XML file. Otherwise, it is purely informational -- the
Numpy recarray containing the data itself does not use this
information.
"""
return self._width
@width.setter
def width(self, width):
if width is not None:
width = int(width)
if width <= 0:
vo_raise(E12, width, self._config, self._pos)
self._width = width
@width.deleter
def width(self):
self._width = None
# ref on FIELD and PARAM behave differently than elsewhere -- here
# they're just informational, such as to refer to a coordinate
# system.
@property
def ref(self):
"""
On FIELD_ elements, ref is used only for informational
purposes, for example to refer to a COOSYS_ or TIMESYS_ element.
"""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, "ref", self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def unit(self):
"""A string specifying the units_ for the FIELD_."""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
return
from astropy import units as u
# First, parse the unit in the default way, so that we can
# still emit a warning if the unit is not to spec.
default_format = _get_default_unit_format(self._config)
unit_obj = u.Unit(unit, format=default_format, parse_strict="silent")
if isinstance(unit_obj, u.UnrecognizedUnit):
warn_or_raise(W50, W50, (unit,), self._config, self._pos)
format = _get_unit_format(self._config)
if format != default_format:
unit_obj = u.Unit(unit, format=format, parse_strict="silent")
self._unit = unit_obj
@unit.deleter
def unit(self):
self._unit = None
@property
def arraysize(self):
"""
Specifies the size of the multidimensional array if this
FIELD_ contains more than a single value.
See `multidimensional arrays`_.
"""
return self._arraysize
@arraysize.setter
def arraysize(self, arraysize):
if arraysize is not None and not re.match(
r"^([0-9]+x)*[0-9]*[*]?(s\W)?$", arraysize
):
vo_raise(E13, arraysize, self._config, self._pos)
self._arraysize = arraysize
@arraysize.deleter
def arraysize(self):
self._arraysize = None
@property
def type(self):
"""
The type attribute on FIELD_ elements is reserved for future
extensions.
"""
return self._type
@type.setter
def type(self, type):
self._type = type
@type.deleter
def type(self):
self._type = None
@property
def values(self):
"""
A :class:`Values` instance (or `None`) defining the domain
of the column.
"""
return self._values
@values.setter
def values(self, values):
assert values is None or isinstance(values, Values)
self._values = values
@values.deleter
def values(self):
self._values = None
@property
def links(self):
"""
A list of :class:`Link` instances used to reference more
details about the meaning of the FIELD_. This is purely
informational and is not used by the `astropy.io.votable`
package.
"""
return self._links
def parse(self, iterator, config):
for start, tag, data, pos in iterator:
if start:
if tag == "VALUES":
self.values.__init__(
self._votable, self, config=config, pos=pos, **data
)
self.values.parse(iterator, config)
elif tag == "LINK":
link = Link(config=config, pos=pos, **data)
self.links.append(link)
link.parse(iterator, config)
elif tag == "DESCRIPTION":
warn_unknown_attrs("DESCRIPTION", data.keys(), config, pos)
elif tag != self._element_name:
self._add_unknown_tag(iterator, tag, data, config, pos)
else:
if tag == "DESCRIPTION":
if self.description is not None:
warn_or_raise(W17, W17, self._element_name, config, pos)
self.description = data or None
elif tag == self._element_name:
break
if self.description is not None:
self.title = " ".join(x.strip() for x in self.description.splitlines())
else:
self.title = self.name
self._setup(config, pos)
return self
def to_xml(self, w, **kwargs):
attrib = w.object_attrs(self, self._attr_list)
if "unit" in attrib:
attrib["unit"] = self.unit.to_string("cds")
with w.tag(self._element_name, attrib=attrib):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
if not self.values.is_defaults():
self.values.to_xml(w, **kwargs)
for link in self.links:
link.to_xml(w, **kwargs)
def to_table_column(self, column):
"""
Sets the attributes of a given `astropy.table.Column` instance
to match the information in this `Field`.
"""
for key in ["ucd", "width", "precision", "utype", "xtype"]:
val = getattr(self, key, None)
if val is not None:
column.meta[key] = val
if not self.values.is_defaults():
self.values.to_table_column(column)
for link in self.links:
link.to_table_column(column)
if self.description is not None:
column.description = self.description
if self.unit is not None:
# TODO: Use units framework when it's available
column.unit = self.unit
if (
isinstance(self.converter, converters.FloatingPoint)
and self.converter.output_format != "{!r:>}"
):
column.format = self.converter.output_format
elif isinstance(self.converter, converters.Char):
column.info.meta["_votable_string_dtype"] = "char"
elif isinstance(self.converter, converters.UnicodeChar):
column.info.meta["_votable_string_dtype"] = "unicodeChar"
@classmethod
def from_table_column(cls, votable, column):
"""
Restores a `Field` instance from a given
`astropy.table.Column` instance.
"""
kwargs = {}
meta = column.info.meta
if meta:
for key in ["ucd", "width", "precision", "utype", "xtype"]:
val = meta.get(key, None)
if val is not None:
kwargs[key] = val
# TODO: Use the unit framework when available
if column.info.unit is not None:
kwargs["unit"] = column.info.unit
kwargs["name"] = column.info.name
result = converters.table_column_to_votable_datatype(column)
kwargs.update(result)
field = cls(votable, **kwargs)
if column.info.description is not None:
field.description = column.info.description
field.values.from_table_column(column)
if meta and "links" in meta:
for link in meta["links"]:
field.links.append(Link.from_table_column(link))
# TODO: Parse format into precision and width
return field
class Param(Field):
"""
PARAM_ element: constant-valued columns in the data.
:class:`Param` objects are a subclass of :class:`Field`, and have
all of its methods and members. Additionally, it defines :attr:`value`.
"""
_attr_list_11 = Field._attr_list_11 + ["value"]
_attr_list_12 = Field._attr_list_12 + ["value"]
_element_name = "PARAM"
def __init__(
self,
votable,
ID=None,
name=None,
value=None,
datatype=None,
arraysize=None,
ucd=None,
unit=None,
width=None,
precision=None,
utype=None,
type=None,
id=None,
config=None,
pos=None,
**extra,
):
self._value = value
Field.__init__(
self,
votable,
ID=ID,
name=name,
datatype=datatype,
arraysize=arraysize,
ucd=ucd,
unit=unit,
precision=precision,
utype=utype,
type=type,
id=id,
config=config,
pos=pos,
**extra,
)
@property
def value(self):
"""
[*required*] The constant value of the parameter. Its type is
determined by the :attr:`~Field.datatype` member.
"""
return self._value
@value.setter
def value(self, value):
if value is None:
value = ""
if isinstance(value, str):
self._value = self.converter.parse(value, self._config, self._pos)[0]
else:
self._value = value
def _setup(self, config, pos):
Field._setup(self, config, pos)
self.value = self._value
def to_xml(self, w, **kwargs):
tmp_value = self._value
self._value = self.converter.output(tmp_value, False)
# We must always have a value
if self._value is None:
self._value = ""
Field.to_xml(self, w, **kwargs)
self._value = tmp_value
class CooSys(SimpleElement):
"""
COOSYS_ element: defines a coordinate system.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_attr_list = ["ID", "equinox", "epoch", "system"]
_element_name = "COOSYS"
def __init__(
self,
ID=None,
equinox=None,
epoch=None,
system=None,
id=None,
config=None,
pos=None,
**extra,
):
if config is None:
config = {}
self._config = config
self._pos = pos
# COOSYS was deprecated in 1.2 but then re-instated in 1.3
if config.get("version_1_2_or_later") and not config.get(
"version_1_3_or_later"
):
warn_or_raise(W27, W27, (), config, pos)
SimpleElement.__init__(self)
self.ID = resolve_id(ID, id, config, pos)
self.equinox = equinox
self.epoch = epoch
self.system = system
warn_unknown_attrs("COOSYS", extra.keys(), config, pos)
@property
def ID(self):
"""
[*required*] The XML ID of the COOSYS_ element, used for
cross-referencing. May be `None` or a string conforming to
XML ID_ syntax.
"""
return self._ID
@ID.setter
def ID(self, ID):
if self._config.get("version_1_1_or_later"):
if ID is None:
vo_raise(E15, (), self._config, self._pos)
xmlutil.check_id(ID, "ID", self._config, self._pos)
self._ID = ID
@property
def system(self):
"""Specifies the type of coordinate system.
Valid choices are:
'eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5', 'galactic',
'supergalactic', 'xy', 'barycentric', or 'geo_app'
"""
return self._system
@system.setter
def system(self, system):
if system not in (
"eq_FK4",
"eq_FK5",
"ICRS",
"ecl_FK4",
"ecl_FK5",
"galactic",
"supergalactic",
"xy",
"barycentric",
"geo_app",
):
warn_or_raise(E16, E16, system, self._config, self._pos)
self._system = system
@system.deleter
def system(self):
self._system = None
@property
def equinox(self):
"""
A parameter required to fix the equatorial or ecliptic systems
(as e.g. "J2000" as the default "eq_FK5" or "B1950" as the
default "eq_FK4").
"""
return self._equinox
@equinox.setter
def equinox(self, equinox):
check_astroyear(equinox, "equinox", self._config, self._pos)
self._equinox = equinox
@equinox.deleter
def equinox(self):
self._equinox = None
@property
def epoch(self):
"""
Specifies the epoch of the positions. It must be a string
specifying an astronomical year.
"""
return self._epoch
@epoch.setter
def epoch(self, epoch):
check_astroyear(epoch, "epoch", self._config, self._pos)
self._epoch = epoch
@epoch.deleter
def epoch(self):
self._epoch = None
class TimeSys(SimpleElement):
"""
TIMESYS_ element: defines a time system.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_attr_list = ["ID", "timeorigin", "timescale", "refposition"]
_element_name = "TIMESYS"
def __init__(
self,
ID=None,
timeorigin=None,
timescale=None,
refposition=None,
id=None,
config=None,
pos=None,
**extra,
):
if config is None:
config = {}
self._config = config
self._pos = pos
# TIMESYS is supported starting in version 1.4
if not config["version_1_4_or_later"]:
warn_or_raise(W54, W54, config["version"], config, pos)
SimpleElement.__init__(self)
self.ID = resolve_id(ID, id, config, pos)
self.timeorigin = timeorigin
self.timescale = timescale
self.refposition = refposition
warn_unknown_attrs(
"TIMESYS",
extra.keys(),
config,
pos,
["ID", "timeorigin", "timescale", "refposition"],
)
@property
def ID(self):
"""
[*required*] The XML ID of the TIMESYS_ element, used for
cross-referencing. Must be a string conforming to
XML ID_ syntax.
"""
return self._ID
@ID.setter
def ID(self, ID):
if ID is None:
vo_raise(E22, (), self._config, self._pos)
xmlutil.check_id(ID, "ID", self._config, self._pos)
self._ID = ID
@property
def timeorigin(self):
"""
Specifies the time origin of the time coordinate,
given as a Julian Date for the the time scale and
reference point defined. It is usually given as a
floating point literal; for convenience, the magic
strings "MJD-origin" (standing for 2400000.5) and
"JD-origin" (standing for 0) are also allowed.
The timeorigin attribute MUST be given unless the
time’s representation contains a year of a calendar
era, in which case it MUST NOT be present. In VOTables,
these representations currently are Gregorian calendar
years with xtype="timestamp", or years in the Julian
or Besselian calendar when a column has yr, a, or Ba as
its unit and no time origin is given.
"""
return self._timeorigin
@timeorigin.setter
def timeorigin(self, timeorigin):
if (
timeorigin is not None
and timeorigin != "MJD-origin"
and timeorigin != "JD-origin"
):
try:
timeorigin = float(timeorigin)
except ValueError:
warn_or_raise(E23, E23, timeorigin, self._config, self._pos)
self._timeorigin = timeorigin
@timeorigin.deleter
def timeorigin(self):
self._timeorigin = None
@property
def timescale(self):
"""
[*required*] String specifying the time scale used. Values
should be taken from the IVOA timescale vocabulary (documented
at http://www.ivoa.net/rdf/timescale).
"""
return self._timescale
@timescale.setter
def timescale(self, timescale):
self._timescale = timescale
@timescale.deleter
def timescale(self):
self._timescale = None
@property
def refposition(self):
"""
[*required*] String specifying the reference position. Values
should be taken from the IVOA refposition vocabulary (documented
at http://www.ivoa.net/rdf/refposition).
"""
return self._refposition
@refposition.setter
def refposition(self, refposition):
self._refposition = refposition
@refposition.deleter
def refposition(self):
self._refposition = None
class FieldRef(SimpleElement, _UtypeProperty, _UcdProperty):
"""
FIELDref_ element: used inside of GROUP_ elements to refer to remote FIELD_ elements.
"""
_attr_list_11 = ["ref"]
_attr_list_12 = _attr_list_11 + ["ucd", "utype"]
_element_name = "FIELDref"
_utype_in_v1_2 = True
_ucd_in_v1_2 = True
def __init__(
self, table, ref, ucd=None, utype=None, config=None, pos=None, **extra
):
"""
*table* is the :class:`Table` object that this :class:`FieldRef`
is a member of.
*ref* is the ID to reference a :class:`Field` object defined
elsewhere.
"""
if config is None:
config = {}
self._config = config
self._pos = pos
SimpleElement.__init__(self)
self._table = table
self.ref = ref
self.ucd = ucd
self.utype = utype
if config.get("version_1_2_or_later"):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if ucd is not None:
warn_unknown_attrs(self._element_name, ["ucd"], config, pos)
if utype is not None:
warn_unknown_attrs(self._element_name, ["utype"], config, pos)
@property
def ref(self):
"""The ID_ of the FIELD_ that this FIELDref_ references."""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, "ref", self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
def get_ref(self):
"""
Lookup the :class:`Field` instance that this :class:`FieldRef`
references.
"""
for field in self._table._votable.iter_fields_and_params():
if isinstance(field, Field) and field.ID == self.ref:
return field
vo_raise(f"No field named '{self.ref}'", self._config, self._pos, KeyError)
class ParamRef(SimpleElement, _UtypeProperty, _UcdProperty):
"""
PARAMref_ element: used inside of GROUP_ elements to refer to remote PARAM_ elements.
The keyword arguments correspond to setting members of the same
name, documented below.
It contains the following publicly-accessible members:
*ref*: An XML ID referring to a <PARAM> element.
"""
_attr_list_11 = ["ref"]
_attr_list_12 = _attr_list_11 + ["ucd", "utype"]
_element_name = "PARAMref"
_utype_in_v1_2 = True
_ucd_in_v1_2 = True
def __init__(self, table, ref, ucd=None, utype=None, config=None, pos=None):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self._table = table
self.ref = ref
self.ucd = ucd
self.utype = utype
if config.get("version_1_2_or_later"):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if ucd is not None:
warn_unknown_attrs(self._element_name, ["ucd"], config, pos)
if utype is not None:
warn_unknown_attrs(self._element_name, ["utype"], config, pos)
@property
def ref(self):
"""The ID_ of the PARAM_ that this PARAMref_ references."""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, "ref", self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
def get_ref(self):
"""
Lookup the :class:`Param` instance that this :class:``PARAMref``
references.
"""
for param in self._table._votable.iter_fields_and_params():
if isinstance(param, Param) and param.ID == self.ref:
return param
vo_raise(f"No params named '{self.ref}'", self._config, self._pos, KeyError)
class Group(
Element,
_IDProperty,
_NameProperty,
_UtypeProperty,
_UcdProperty,
_DescriptionProperty,
):
"""
GROUP_ element: groups FIELD_ and PARAM_ elements.
This information is currently ignored by the vo package---that is
the columns in the recarray are always flat---but the grouping
information is stored so that it can be written out again to the
XML file.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(
self,
table,
ID=None,
name=None,
ref=None,
ucd=None,
utype=None,
id=None,
config=None,
pos=None,
**extra,
):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self._table = table
self.ID = resolve_id(ID, id, config, pos) or xmlutil.fix_id(name, config, pos)
self.name = name
self.ref = ref
self.ucd = ucd
self.utype = utype
self.description = None
self._entries = HomogeneousList((FieldRef, ParamRef, Group, Param))
warn_unknown_attrs("GROUP", extra.keys(), config, pos)
def __repr__(self):
return f"<GROUP>... {len(self._entries)} entries ...</GROUP>"
@property
def ref(self):
"""
Currently ignored, as it's not clear from the spec how this is
meant to work.
"""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, "ref", self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def entries(self):
"""
[read-only] A list of members of the GROUP_. This list may
only contain objects of type :class:`Param`, :class:`Group`,
:class:`ParamRef` and :class:`FieldRef`.
"""
return self._entries
def _add_fieldref(self, iterator, tag, data, config, pos):
fieldref = FieldRef(self._table, config=config, pos=pos, **data)
self.entries.append(fieldref)
def _add_paramref(self, iterator, tag, data, config, pos):
paramref = ParamRef(self._table, config=config, pos=pos, **data)
self.entries.append(paramref)
def _add_param(self, iterator, tag, data, config, pos):
if isinstance(self._table, VOTableFile):
votable = self._table
else:
votable = self._table._votable
param = Param(votable, config=config, pos=pos, **data)
self.entries.append(param)
param.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
group = Group(self._table, config=config, pos=pos, **data)
self.entries.append(group)
group.parse(iterator, config)
def parse(self, iterator, config):
tag_mapping = {
"FIELDref": self._add_fieldref,
"PARAMref": self._add_paramref,
"PARAM": self._add_param,
"GROUP": self._add_group,
"DESCRIPTION": self._ignore_add,
}
for start, tag, data, pos in iterator:
if start:
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos
)
else:
if tag == "DESCRIPTION":
if self.description is not None:
warn_or_raise(W17, W17, "GROUP", config, pos)
self.description = data or None
elif tag == "GROUP":
break
return self
def to_xml(self, w, **kwargs):
with w.tag(
"GROUP", attrib=w.object_attrs(self, ["ID", "name", "ref", "ucd", "utype"])
):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
for entry in self.entries:
entry.to_xml(w, **kwargs)
def iter_fields_and_params(self):
"""
Recursively iterate over all :class:`Param` elements in this
:class:`Group`.
"""
for entry in self.entries:
if isinstance(entry, Param):
yield entry
elif isinstance(entry, Group):
yield from entry.iter_fields_and_params()
def iter_groups(self):
"""
Recursively iterate over all sub-:class:`Group` instances in
this :class:`Group`.
"""
for entry in self.entries:
if isinstance(entry, Group):
yield entry
yield from entry.iter_groups()
class Table(Element, _IDProperty, _NameProperty, _UcdProperty, _DescriptionProperty):
"""
TABLE_ element: optionally contains data.
It contains the following publicly-accessible and mutable
attribute:
*array*: A Numpy masked array of the data itself, where each
row is a row of votable data, and columns are named and typed
based on the <FIELD> elements of the table. The mask is
parallel to the data array, except for variable-length fields.
For those fields, the numpy array's column type is "object"
(``"O"``), and another masked array is stored there.
If the Table contains no data, (for example, its enclosing
:class:`Resource` has :attr:`~Resource.type` == 'meta') *array*
will have zero-length.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(
self,
votable,
ID=None,
name=None,
ref=None,
ucd=None,
utype=None,
nrows=None,
id=None,
config=None,
pos=None,
**extra,
):
if config is None:
config = {}
self._config = config
self._pos = pos
self._empty = False
Element.__init__(self)
self._votable = votable
self.ID = resolve_id(ID, id, config, pos) or xmlutil.fix_id(name, config, pos)
self.name = name
xmlutil.check_id(ref, "ref", config, pos)
self._ref = ref
self.ucd = ucd
self.utype = utype
if nrows is not None:
nrows = int(nrows)
if nrows < 0:
raise ValueError("'nrows' cannot be negative.")
self._nrows = nrows
self.description = None
self.format = "tabledata"
self._fields = HomogeneousList(Field)
self._params = HomogeneousList(Param)
self._groups = HomogeneousList(Group)
self._links = HomogeneousList(Link)
self._infos = HomogeneousList(Info)
self.array = ma.array([])
warn_unknown_attrs("TABLE", extra.keys(), config, pos)
def __repr__(self):
return repr(self.to_table())
def __bytes__(self):
return bytes(self.to_table())
def __str__(self):
return str(self.to_table())
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, ref):
"""
Refer to another TABLE, previously defined, by the *ref* ID_
for all metadata (FIELD_, PARAM_ etc.) information.
"""
# When the ref changes, we want to verify that it will work
# by actually going and looking for the referenced table.
# If found, set a bunch of properties in this table based
# on the other one.
xmlutil.check_id(ref, "ref", self._config, self._pos)
if ref is not None:
try:
table = self._votable.get_table_by_id(ref, before=self)
except KeyError:
warn_or_raise(W43, W43, ("TABLE", self.ref), self._config, self._pos)
ref = None
else:
self._fields = table.fields
self._params = table.params
self._groups = table.groups
self._links = table.links
else:
del self._fields[:]
del self._params[:]
del self._groups[:]
del self._links[:]
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def format(self):
"""The serialization format of the table [*required*].
Must be one of:
'tabledata' (TABLEDATA_), 'binary' (BINARY_), 'binary2' (BINARY2_)
'fits' (FITS_).
Note that the 'fits' format, since it requires an external
file, can not be written out. Any file read in with 'fits'
format will be read out, by default, in 'tabledata' format.
See :ref:`astropy:votable-serialization`.
"""
return self._format
@format.setter
def format(self, format):
format = format.lower()
if format == "fits":
vo_raise(
"fits format can not be written out, only read.",
self._config,
self._pos,
NotImplementedError,
)
if format == "binary2":
if not self._config["version_1_3_or_later"]:
vo_raise(
"binary2 only supported in votable 1.3 or later",
self._config,
self._pos,
)
elif format not in ("tabledata", "binary"):
vo_raise(f"Invalid format '{format}'", self._config, self._pos)
self._format = format
@property
def nrows(self):
"""
[*immutable*] The number of rows in the table, as specified in
the XML file.
"""
return self._nrows
@property
def fields(self):
"""
A list of :class:`Field` objects describing the types of each
of the data columns.
"""
return self._fields
@property
def params(self):
"""
A list of parameters (constant-valued columns) for the
table. Must contain only :class:`Param` objects.
"""
return self._params
@property
def groups(self):
"""
A list of :class:`Group` objects describing how the columns
and parameters are grouped. Currently this information is
only kept around for round-tripping and informational
purposes.
"""
return self._groups
@property
def links(self):
"""
A list of :class:`Link` objects (pointers to other documents
or servers through a URI) for the table.
"""
return self._links
@property
def infos(self):
"""
A list of :class:`Info` objects for the table. Allows for
post-operational diagnostics.
"""
return self._infos
def is_empty(self):
"""
Returns True if this table doesn't contain any real data
because it was skipped over by the parser (through use of the
``table_number`` kwarg).
"""
return self._empty
def create_arrays(self, nrows=0, config=None):
"""
Create a new array to hold the data based on the current set
of fields, and store them in the *array* and member variable.
Any data in the existing array will be lost.
*nrows*, if provided, is the number of rows to allocate.
"""
if nrows is None:
nrows = 0
fields = self.fields
if len(fields) == 0:
array = np.recarray((nrows,), dtype="O")
mask = np.zeros((nrows,), dtype="b")
else:
# for field in fields: field._setup(config)
Field.uniqify_names(fields)
dtype = []
for x in fields:
if x._unique_name == x.ID:
id = x.ID
else:
id = (x._unique_name, x.ID)
dtype.append((id, x.converter.format))
array = np.recarray((nrows,), dtype=np.dtype(dtype))
descr_mask = []
for d in array.dtype.descr:
new_type = (d[1][1] == "O" and "O") or "bool"
if len(d) == 2:
descr_mask.append((d[0], new_type))
elif len(d) == 3:
descr_mask.append((d[0], new_type, d[2]))
mask = np.zeros((nrows,), dtype=descr_mask)
self.array = ma.array(array, mask=mask)
def _resize_strategy(self, size):
"""
Return a new (larger) size based on size, used for
reallocating an array when it fills up. This is in its own
function so the resizing strategy can be easily replaced.
"""
# Once we go beyond 0, make a big step -- after that use a
# factor of 1.5 to help keep memory usage compact
if size == 0:
return 512
return int(np.ceil(size * RESIZE_AMOUNT))
def _add_field(self, iterator, tag, data, config, pos):
field = Field(self._votable, config=config, pos=pos, **data)
self.fields.append(field)
field.parse(iterator, config)
def _add_param(self, iterator, tag, data, config, pos):
param = Param(self._votable, config=config, pos=pos, **data)
self.params.append(param)
param.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
group = Group(self, config=config, pos=pos, **data)
self.groups.append(group)
group.parse(iterator, config)
def _add_link(self, iterator, tag, data, config, pos):
link = Link(config=config, pos=pos, **data)
self.links.append(link)
link.parse(iterator, config)
def _add_info(self, iterator, tag, data, config, pos):
if not config.get("version_1_2_or_later"):
warn_or_raise(W26, W26, ("INFO", "TABLE", "1.2"), config, pos)
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
def parse(self, iterator, config):
columns = config.get("columns")
# If we've requested to read in only a specific table, skip
# all others
table_number = config.get("table_number")
current_table_number = config.get("_current_table_number")
skip_table = False
if current_table_number is not None:
config["_current_table_number"] += 1
if table_number is not None and table_number != current_table_number:
skip_table = True
self._empty = True
table_id = config.get("table_id")
if table_id is not None:
if table_id != self.ID:
skip_table = True
self._empty = True
if self.ref is not None:
# This table doesn't have its own datatype descriptors, it
# just references those from another table.
# This is to call the property setter to go and get the
# referenced information
self.ref = self.ref
for start, tag, data, pos in iterator:
if start:
if tag == "DATA":
warn_unknown_attrs("DATA", data.keys(), config, pos)
break
else:
if tag == "TABLE":
return self
elif tag == "DESCRIPTION":
if self.description is not None:
warn_or_raise(W17, W17, "RESOURCE", config, pos)
self.description = data or None
else:
tag_mapping = {
"FIELD": self._add_field,
"PARAM": self._add_param,
"GROUP": self._add_group,
"LINK": self._add_link,
"INFO": self._add_info,
"DESCRIPTION": self._ignore_add,
}
for start, tag, data, pos in iterator:
if start:
if tag == "DATA":
if len(self.fields) == 0:
warn_or_raise(E25, E25, None, config, pos)
warn_unknown_attrs("DATA", data.keys(), config, pos)
break
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos
)
else:
if tag == "DESCRIPTION":
if self.description is not None:
warn_or_raise(W17, W17, "RESOURCE", config, pos)
self.description = data or None
elif tag == "TABLE":
# For error checking purposes
Field.uniqify_names(self.fields)
# We still need to create arrays, even if the file
# contains no DATA section
self.create_arrays(nrows=0, config=config)
return self
self.create_arrays(nrows=self._nrows, config=config)
fields = self.fields
names = [x.ID for x in fields]
# Deal with a subset of the columns, if requested.
if not columns:
colnumbers = list(range(len(fields)))
else:
if isinstance(columns, str):
columns = [columns]
columns = np.asarray(columns)
if issubclass(columns.dtype.type, np.integer):
if np.any(columns < 0) or np.any(columns > len(fields)):
raise ValueError("Some specified column numbers out of range")
colnumbers = columns
elif issubclass(columns.dtype.type, np.character):
try:
colnumbers = [names.index(x) for x in columns]
except ValueError:
raise ValueError(f"Columns '{columns}' not found in fields list")
else:
raise TypeError("Invalid columns list")
if (not skip_table) and (len(fields) > 0):
for start, tag, data, pos in iterator:
if start:
if tag == "TABLEDATA":
warn_unknown_attrs("TABLEDATA", data.keys(), config, pos)
self.array = self._parse_tabledata(
iterator, colnumbers, fields, config
)
break
elif tag == "BINARY":
warn_unknown_attrs("BINARY", data.keys(), config, pos)
self.array = self._parse_binary(
1, iterator, colnumbers, fields, config, pos
)
break
elif tag == "BINARY2":
if not config["version_1_3_or_later"]:
warn_or_raise(W52, W52, config["version"], config, pos)
self.array = self._parse_binary(
2, iterator, colnumbers, fields, config, pos
)
break
elif tag == "FITS":
warn_unknown_attrs("FITS", data.keys(), config, pos, ["extnum"])
try:
extnum = int(data.get("extnum", 0))
if extnum < 0:
raise ValueError("'extnum' cannot be negative.")
except ValueError:
vo_raise(E17, (), config, pos)
self.array = self._parse_fits(iterator, extnum, config)
break
else:
warn_or_raise(W37, W37, tag, config, pos)
break
for start, tag, data, pos in iterator:
if not start and tag == "DATA":
break
for start, tag, data, pos in iterator:
if start and tag == "INFO":
if not config.get("version_1_2_or_later"):
warn_or_raise(W26, W26, ("INFO", "TABLE", "1.2"), config, pos)
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
elif not start and tag == "TABLE":
break
return self
def _parse_tabledata(self, iterator, colnumbers, fields, config):
# Since we don't know the number of rows up front, we'll
# reallocate the record array to make room as we go. This
# prevents the need to scan through the XML twice. The
# allocation is by factors of 1.5.
invalid = config.get("invalid", "exception")
# Need to have only one reference so that we can resize the
# array
array = self.array
del self.array
parsers = [field.converter.parse for field in fields]
binparsers = [field.converter.binparse for field in fields]
numrows = 0
alloc_rows = len(array)
colnumbers_bits = [i in colnumbers for i in range(len(fields))]
row_default = [x.converter.default for x in fields]
mask_default = [True] * len(fields)
array_chunk = []
mask_chunk = []
chunk_size = config.get("chunk_size", DEFAULT_CHUNK_SIZE)
for start, tag, data, pos in iterator:
if tag == "TR":
# Now parse one row
row = row_default[:]
row_mask = mask_default[:]
i = 0
for start, tag, data, pos in iterator:
if start:
binary = data.get("encoding", None) == "base64"
warn_unknown_attrs(tag, data.keys(), config, pos, ["encoding"])
else:
if tag == "TD":
if i >= len(fields):
vo_raise(E20, len(fields), config, pos)
if colnumbers_bits[i]:
try:
if binary:
rawdata = base64.b64decode(data.encode("ascii"))
buf = io.BytesIO(rawdata)
buf.seek(0)
try:
value, mask_value = binparsers[i](buf.read)
except Exception as e:
vo_reraise(
e,
config,
pos,
"(in row {:d}, col '{}')".format(
len(array_chunk), fields[i].ID
),
)
else:
try:
value, mask_value = parsers[i](
data, config, pos
)
except Exception as e:
vo_reraise(
e,
config,
pos,
"(in row {:d}, col '{}')".format(
len(array_chunk), fields[i].ID
),
)
except Exception as e:
if invalid == "exception":
vo_reraise(e, config, pos)
else:
row[i] = value
row_mask[i] = mask_value
elif tag == "TR":
break
else:
self._add_unknown_tag(iterator, tag, data, config, pos)
i += 1
if i < len(fields):
vo_raise(E21, (i, len(fields)), config, pos)
array_chunk.append(tuple(row))
mask_chunk.append(tuple(row_mask))
if len(array_chunk) == chunk_size:
while numrows + chunk_size > alloc_rows:
alloc_rows = self._resize_strategy(alloc_rows)
if alloc_rows != len(array):
array = _resize(array, alloc_rows)
array[numrows : numrows + chunk_size] = array_chunk
array.mask[numrows : numrows + chunk_size] = mask_chunk
numrows += chunk_size
array_chunk = []
mask_chunk = []
elif not start and tag == "TABLEDATA":
break
# Now, resize the array to the exact number of rows we need and
# put the last chunk values in there.
alloc_rows = numrows + len(array_chunk)
array = _resize(array, alloc_rows)
array[numrows:] = array_chunk
if alloc_rows != 0:
array.mask[numrows:] = mask_chunk
numrows += len(array_chunk)
if self.nrows is not None and self.nrows >= 0 and self.nrows != numrows:
warn_or_raise(W18, W18, (self.nrows, numrows), config, pos)
self._nrows = numrows
return array
def _get_binary_data_stream(self, iterator, config):
have_local_stream = False
for start, tag, data, pos in iterator:
if tag == "STREAM":
if start:
warn_unknown_attrs(
"STREAM",
data.keys(),
config,
pos,
["type", "href", "actuate", "encoding", "expires", "rights"],
)
if "href" not in data:
have_local_stream = True
if data.get("encoding", None) != "base64":
warn_or_raise(
W38, W38, data.get("encoding", None), config, pos
)
else:
href = data["href"]
xmlutil.check_anyuri(href, config, pos)
encoding = data.get("encoding", None)
else:
buffer = data
break
if have_local_stream:
buffer = base64.b64decode(buffer.encode("ascii"))
string_io = io.BytesIO(buffer)
string_io.seek(0)
read = string_io.read
else:
if not href.startswith(("http", "ftp", "file")):
vo_raise(
"The vo package only supports remote data through http, "
+ "ftp or file",
self._config,
self._pos,
NotImplementedError,
)
fd = urllib.request.urlopen(href)
if encoding is not None:
if encoding == "gzip":
fd = gzip.GzipFile(href, "rb", fileobj=fd)
elif encoding == "base64":
fd = codecs.EncodedFile(fd, "base64")
else:
vo_raise(
f"Unknown encoding type '{encoding}'",
self._config,
self._pos,
NotImplementedError,
)
read = fd.read
def careful_read(length):
result = read(length)
if len(result) != length:
raise EOFError
return result
return careful_read
def _parse_binary(self, mode, iterator, colnumbers, fields, config, pos):
fields = self.fields
careful_read = self._get_binary_data_stream(iterator, config)
# Need to have only one reference so that we can resize the
# array
array = self.array
del self.array
binparsers = [field.converter.binparse for field in fields]
numrows = 0
alloc_rows = len(array)
while True:
# Resize result arrays if necessary
if numrows >= alloc_rows:
alloc_rows = self._resize_strategy(alloc_rows)
array = _resize(array, alloc_rows)
row_data = []
row_mask_data = []
try:
if mode == 2:
mask_bits = careful_read(int((len(fields) + 7) / 8))
row_mask_data = list(
converters.bitarray_to_bool(mask_bits, len(fields))
)
# Ignore the mask for string columns (see issue 8995)
for i, f in enumerate(fields):
if row_mask_data[i] and (
f.datatype == "char" or f.datatype == "unicodeChar"
):
row_mask_data[i] = False
for i, binparse in enumerate(binparsers):
try:
value, value_mask = binparse(careful_read)
except EOFError:
raise
except Exception as e:
vo_reraise(
e,
config,
pos,
f"(in row {numrows:d}, col '{fields[i].ID}')",
)
row_data.append(value)
if mode == 1:
row_mask_data.append(value_mask)
else:
row_mask_data[i] = row_mask_data[i] or value_mask
except EOFError:
break
row = [x.converter.default for x in fields]
row_mask = [False] * len(fields)
for i in colnumbers:
row[i] = row_data[i]
row_mask[i] = row_mask_data[i]
array[numrows] = tuple(row)
array.mask[numrows] = tuple(row_mask)
numrows += 1
array = _resize(array, numrows)
return array
def _parse_fits(self, iterator, extnum, config):
for start, tag, data, pos in iterator:
if tag == "STREAM":
if start:
warn_unknown_attrs(
"STREAM",
data.keys(),
config,
pos,
["type", "href", "actuate", "encoding", "expires", "rights"],
)
href = data["href"]
encoding = data.get("encoding", None)
else:
break
if not href.startswith(("http", "ftp", "file")):
vo_raise(
"The vo package only supports remote data through http, ftp or file",
self._config,
self._pos,
NotImplementedError,
)
fd = urllib.request.urlopen(href)
if encoding is not None:
if encoding == "gzip":
fd = gzip.GzipFile(href, "r", fileobj=fd)
elif encoding == "base64":
fd = codecs.EncodedFile(fd, "base64")
else:
vo_raise(
f"Unknown encoding type '{encoding}'",
self._config,
self._pos,
NotImplementedError,
)
hdulist = fits.open(fd)
array = hdulist[int(extnum)].data
if array.dtype != self.array.dtype:
warn_or_raise(W19, W19, (), self._config, self._pos)
return array
def to_xml(self, w, **kwargs):
specified_format = kwargs.get("tabledata_format")
if specified_format is not None:
format = specified_format
else:
format = self.format
if format == "fits":
format = "tabledata"
with w.tag(
"TABLE",
attrib=w.object_attrs(self, ("ID", "name", "ref", "ucd", "utype", "nrows")),
):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
for element_set in (self.fields, self.params):
for element in element_set:
element._setup({}, None)
if self.ref is None:
for element_set in (self.fields, self.params, self.groups, self.links):
for element in element_set:
element.to_xml(w, **kwargs)
elif kwargs["version_1_2_or_later"]:
index = list(self._votable.iter_tables()).index(self)
group = Group(self, ID=f"_g{index}")
group.to_xml(w, **kwargs)
if len(self.array):
with w.tag("DATA"):
if format == "tabledata":
self._write_tabledata(w, **kwargs)
elif format == "binary":
self._write_binary(1, w, **kwargs)
elif format == "binary2":
self._write_binary(2, w, **kwargs)
if kwargs["version_1_2_or_later"]:
for element in self._infos:
element.to_xml(w, **kwargs)
def _write_tabledata(self, w, **kwargs):
fields = self.fields
array = self.array
with w.tag("TABLEDATA"):
w._flush()
if _has_c_tabledata_writer and not kwargs.get("_debug_python_based_parser"):
supports_empty_values = [
field.converter.supports_empty_values(kwargs) for field in fields
]
fields = [field.converter.output for field in fields]
indent = len(w._tags) - 1
tablewriter.write_tabledata(
w.write,
array.data,
array.mask,
fields,
supports_empty_values,
indent,
1 << 8,
)
else:
write = w.write
indent_spaces = w.get_indentation_spaces()
tr_start = indent_spaces + "<TR>\n"
tr_end = indent_spaces + "</TR>\n"
td = indent_spaces + " <TD>{}</TD>\n"
td_empty = indent_spaces + " <TD/>\n"
fields = [
(
i,
field.converter.output,
field.converter.supports_empty_values(kwargs),
)
for i, field in enumerate(fields)
]
for row in range(len(array)):
write(tr_start)
array_row = array.data[row]
mask_row = array.mask[row]
for i, output, supports_empty_values in fields:
data = array_row[i]
masked = mask_row[i]
if supports_empty_values and np.all(masked):
write(td_empty)
else:
try:
val = output(data, masked)
except Exception as e:
vo_reraise(
e,
additional="(in row {:d}, col '{}')".format(
row, self.fields[i].ID
),
)
if len(val):
write(td.format(val))
else:
write(td_empty)
write(tr_end)
def _write_binary(self, mode, w, **kwargs):
fields = self.fields
array = self.array
if mode == 1:
tag_name = "BINARY"
else:
tag_name = "BINARY2"
with w.tag(tag_name):
with w.tag("STREAM", encoding="base64"):
fields_basic = [
(i, field.converter.binoutput) for (i, field) in enumerate(fields)
]
data = io.BytesIO()
for row in range(len(array)):
array_row = array.data[row]
array_mask = array.mask[row]
if mode == 2:
flattened = np.array([np.all(x) for x in array_mask])
data.write(converters.bool_to_bitarray(flattened))
for i, converter in fields_basic:
try:
chunk = converter(array_row[i], array_mask[i])
assert type(chunk) == bytes
except Exception as e:
vo_reraise(
e, additional=f"(in row {row:d}, col '{fields[i].ID}')"
)
data.write(chunk)
w._flush()
w.write(base64.b64encode(data.getvalue()).decode("ascii"))
def to_table(self, use_names_over_ids=False):
"""
Convert this VO Table to an `astropy.table.Table` instance.
Parameters
----------
use_names_over_ids : bool, optional
When `True` use the ``name`` attributes of columns as the
names of columns in the `astropy.table.Table` instance.
Since names are not guaranteed to be unique, this may cause
some columns to be renamed by appending numbers to the end.
Otherwise (default), use the ID attributes as the column
names.
.. warning::
Variable-length array fields may not be restored
identically when round-tripping through the
`astropy.table.Table` instance.
"""
from astropy.table import Table
meta = {}
for key in ["ID", "name", "ref", "ucd", "utype", "description"]:
val = getattr(self, key, None)
if val is not None:
meta[key] = val
if use_names_over_ids:
names = [field.name for field in self.fields]
unique_names = []
for i, name in enumerate(names):
new_name = name
i = 2
while new_name in unique_names:
new_name = f"{name}{i}"
i += 1
unique_names.append(new_name)
names = unique_names
else:
names = [field.ID for field in self.fields]
table = Table(self.array, names=names, meta=meta)
for name, field in zip(names, self.fields):
column = table[name]
field.to_table_column(column)
return table
@classmethod
def from_table(cls, votable, table):
"""
Create a `Table` instance from a given `astropy.table.Table`
instance.
"""
kwargs = {}
for key in ["ID", "name", "ref", "ucd", "utype"]:
val = table.meta.get(key)
if val is not None:
kwargs[key] = val
new_table = cls(votable, **kwargs)
if "description" in table.meta:
new_table.description = table.meta["description"]
for colname in table.colnames:
column = table[colname]
new_table.fields.append(Field.from_table_column(votable, column))
if table.mask is None:
new_table.array = ma.array(np.asarray(table))
else:
new_table.array = ma.array(np.asarray(table), mask=np.asarray(table.mask))
return new_table
def iter_fields_and_params(self):
"""
Recursively iterate over all FIELD and PARAM elements in the
TABLE.
"""
yield from self.params
yield from self.fields
for group in self.groups:
yield from group.iter_fields_and_params()
get_field_by_id = _lookup_by_attr_factory(
"ID",
True,
"iter_fields_and_params",
"FIELD or PARAM",
"""
Looks up a FIELD or PARAM element by the given ID.
""",
)
get_field_by_id_or_name = _lookup_by_id_or_name_factory(
"iter_fields_and_params",
"FIELD or PARAM",
"""
Looks up a FIELD or PARAM element by the given ID or name.
""",
)
get_fields_by_utype = _lookup_by_attr_factory(
"utype",
False,
"iter_fields_and_params",
"FIELD or PARAM",
"""
Looks up a FIELD or PARAM element by the given utype and
returns an iterator emitting all matches.
""",
)
def iter_groups(self):
"""
Recursively iterate over all GROUP elements in the TABLE.
"""
for group in self.groups:
yield group
yield from group.iter_groups()
get_group_by_id = _lookup_by_attr_factory(
"ID",
True,
"iter_groups",
"GROUP",
"""
Looks up a GROUP element by the given ID. Used by the group's
"ref" attribute
""",
)
get_groups_by_utype = _lookup_by_attr_factory(
"utype",
False,
"iter_groups",
"GROUP",
"""
Looks up a GROUP element by the given utype and returns an
iterator emitting all matches.
""",
)
def iter_info(self):
yield from self.infos
class Resource(
Element, _IDProperty, _NameProperty, _UtypeProperty, _DescriptionProperty
):
"""
RESOURCE_ element: Groups TABLE_ and RESOURCE_ elements.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(
self,
name=None,
ID=None,
utype=None,
type="results",
id=None,
config=None,
pos=None,
**kwargs,
):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self.name = name
self.ID = resolve_id(ID, id, config, pos)
self.utype = utype
self.type = type
self._extra_attributes = kwargs
self.description = None
self._coordinate_systems = HomogeneousList(CooSys)
self._time_systems = HomogeneousList(TimeSys)
self._groups = HomogeneousList(Group)
self._params = HomogeneousList(Param)
self._infos = HomogeneousList(Info)
self._links = HomogeneousList(Link)
self._tables = HomogeneousList(Table)
self._resources = HomogeneousList(Resource)
warn_unknown_attrs("RESOURCE", kwargs.keys(), config, pos)
def __repr__(self):
buff = io.StringIO()
w = XMLWriter(buff)
w.element(self._element_name, attrib=w.object_attrs(self, self._attr_list))
return buff.getvalue().strip()
@property
def type(self):
"""The type of the resource [*required*].
Must be either:
- 'results': This resource contains actual result values
(default)
- 'meta': This resource contains only datatype descriptions
(FIELD_ elements), but no actual data.
"""
return self._type
@type.setter
def type(self, type):
if type not in ("results", "meta"):
vo_raise(E18, type, self._config, self._pos)
self._type = type
@property
def extra_attributes(self):
"""Dictionary of extra attributes of the RESOURCE_ element.
This is dictionary of string keys to string values containing any
extra attributes of the RESOURCE_ element that are not defined
in the specification. The specification explicitly allows
for extra attributes here, but nowhere else.
"""
return self._extra_attributes
@property
def coordinate_systems(self):
"""
A list of coordinate system definitions (COOSYS_ elements) for
the RESOURCE_. Must contain only `CooSys` objects.
"""
return self._coordinate_systems
@property
def time_systems(self):
"""
A list of time system definitions (TIMESYS_ elements) for
the RESOURCE_. Must contain only `TimeSys` objects.
"""
return self._time_systems
@property
def infos(self):
"""
A list of informational parameters (key-value pairs) for the
resource. Must only contain `Info` objects.
"""
return self._infos
@property
def groups(self):
"""
A list of groups.
"""
return self._groups
@property
def params(self):
"""
A list of parameters (constant-valued columns) for the
resource. Must contain only `Param` objects.
"""
return self._params
@property
def links(self):
"""
A list of links (pointers to other documents or servers
through a URI) for the resource. Must contain only `Link`
objects.
"""
return self._links
@property
def tables(self):
"""
A list of tables in the resource. Must contain only
`Table` objects.
"""
return self._tables
@property
def resources(self):
"""
A list of nested resources inside this resource. Must contain
only `Resource` objects.
"""
return self._resources
def _add_table(self, iterator, tag, data, config, pos):
table = Table(self._votable, config=config, pos=pos, **data)
self.tables.append(table)
table.parse(iterator, config)
def _add_info(self, iterator, tag, data, config, pos):
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
group = Group(self, config=config, pos=pos, **data)
self.groups.append(group)
group.parse(iterator, config)
def _add_param(self, iterator, tag, data, config, pos):
param = Param(self._votable, config=config, pos=pos, **data)
self.params.append(param)
param.parse(iterator, config)
def _add_coosys(self, iterator, tag, data, config, pos):
coosys = CooSys(config=config, pos=pos, **data)
self.coordinate_systems.append(coosys)
coosys.parse(iterator, config)
def _add_timesys(self, iterator, tag, data, config, pos):
timesys = TimeSys(config=config, pos=pos, **data)
self.time_systems.append(timesys)
timesys.parse(iterator, config)
def _add_resource(self, iterator, tag, data, config, pos):
resource = Resource(config=config, pos=pos, **data)
self.resources.append(resource)
resource.parse(self._votable, iterator, config)
def _add_link(self, iterator, tag, data, config, pos):
link = Link(config=config, pos=pos, **data)
self.links.append(link)
link.parse(iterator, config)
def parse(self, votable, iterator, config):
self._votable = votable
tag_mapping = {
"TABLE": self._add_table,
"INFO": self._add_info,
"PARAM": self._add_param,
"GROUP": self._add_group,
"COOSYS": self._add_coosys,
"TIMESYS": self._add_timesys,
"RESOURCE": self._add_resource,
"LINK": self._add_link,
"DESCRIPTION": self._ignore_add,
}
for start, tag, data, pos in iterator:
if start:
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos
)
elif tag == "DESCRIPTION":
if self.description is not None:
warn_or_raise(W17, W17, "RESOURCE", config, pos)
self.description = data or None
elif tag == "RESOURCE":
break
del self._votable
return self
def to_xml(self, w, **kwargs):
attrs = w.object_attrs(self, ("ID", "type", "utype"))
attrs.update(self.extra_attributes)
with w.tag("RESOURCE", attrib=attrs):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
for element_set in (
self.coordinate_systems,
self.time_systems,
self.params,
self.infos,
self.links,
self.tables,
self.resources,
):
for element in element_set:
element.to_xml(w, **kwargs)
def iter_tables(self):
"""
Recursively iterates over all tables in the resource and
nested resources.
"""
yield from self.tables
for resource in self.resources:
yield from resource.iter_tables()
def iter_fields_and_params(self):
"""
Recursively iterates over all FIELD_ and PARAM_ elements in
the resource, its tables and nested resources.
"""
yield from self.params
for table in self.tables:
yield from table.iter_fields_and_params()
for resource in self.resources:
yield from resource.iter_fields_and_params()
def iter_coosys(self):
"""
Recursively iterates over all the COOSYS_ elements in the
resource and nested resources.
"""
yield from self.coordinate_systems
for resource in self.resources:
yield from resource.iter_coosys()
def iter_timesys(self):
"""
Recursively iterates over all the TIMESYS_ elements in the
resource and nested resources.
"""
yield from self.time_systems
for resource in self.resources:
yield from resource.iter_timesys()
def iter_info(self):
"""
Recursively iterates over all the INFO_ elements in the
resource and nested resources.
"""
yield from self.infos
for table in self.tables:
yield from table.iter_info()
for resource in self.resources:
yield from resource.iter_info()
class VOTableFile(Element, _IDProperty, _DescriptionProperty):
"""
VOTABLE_ element: represents an entire file.
The keyword arguments correspond to setting members of the same
name, documented below.
*version* is settable at construction time only, since conformance
tests for building the rest of the structure depend on it.
"""
def __init__(self, ID=None, id=None, config=None, pos=None, version="1.4"):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self.ID = resolve_id(ID, id, config, pos)
self.description = None
self._coordinate_systems = HomogeneousList(CooSys)
self._time_systems = HomogeneousList(TimeSys)
self._params = HomogeneousList(Param)
self._infos = HomogeneousList(Info)
self._resources = HomogeneousList(Resource)
self._groups = HomogeneousList(Group)
version = str(version)
if version == "1.0":
warnings.warn(
"VOTable 1.0 support is deprecated in astropy 4.3 and will be "
"removed in a future release",
AstropyDeprecationWarning,
)
elif (version != "1.0") and (version not in self._version_namespace_map):
allowed_from_map = "', '".join(self._version_namespace_map)
raise ValueError(f"'version' should be in ('1.0', '{allowed_from_map}').")
self._version = version
def __repr__(self):
n_tables = len(list(self.iter_tables()))
return f"<VOTABLE>... {n_tables} tables ...</VOTABLE>"
@property
def version(self):
"""
The version of the VOTable specification that the file uses.
"""
return self._version
@version.setter
def version(self, version):
version = str(version)
if version not in self._version_namespace_map:
allowed_from_map = "', '".join(self._version_namespace_map)
raise ValueError(
"astropy.io.votable only supports VOTable versions"
f" '{allowed_from_map}'"
)
self._version = version
@property
def coordinate_systems(self):
"""
A list of coordinate system descriptions for the file. Must
contain only `CooSys` objects.
"""
return self._coordinate_systems
@property
def time_systems(self):
"""
A list of time system descriptions for the file. Must
contain only `TimeSys` objects.
"""
return self._time_systems
@property
def params(self):
"""
A list of parameters (constant-valued columns) that apply to
the entire file. Must contain only `Param` objects.
"""
return self._params
@property
def infos(self):
"""
A list of informational parameters (key-value pairs) for the
entire file. Must only contain `Info` objects.
"""
return self._infos
@property
def resources(self):
"""
A list of resources, in the order they appear in the file.
Must only contain `Resource` objects.
"""
return self._resources
@property
def groups(self):
"""
A list of groups, in the order they appear in the file. Only
supported as a child of the VOTABLE element in VOTable 1.2 or
later.
"""
return self._groups
def _add_param(self, iterator, tag, data, config, pos):
param = Param(self, config=config, pos=pos, **data)
self.params.append(param)
param.parse(iterator, config)
def _add_resource(self, iterator, tag, data, config, pos):
resource = Resource(config=config, pos=pos, **data)
self.resources.append(resource)
resource.parse(self, iterator, config)
def _add_coosys(self, iterator, tag, data, config, pos):
coosys = CooSys(config=config, pos=pos, **data)
self.coordinate_systems.append(coosys)
coosys.parse(iterator, config)
def _add_timesys(self, iterator, tag, data, config, pos):
timesys = TimeSys(config=config, pos=pos, **data)
self.time_systems.append(timesys)
timesys.parse(iterator, config)
def _add_info(self, iterator, tag, data, config, pos):
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
if not config.get("version_1_2_or_later"):
warn_or_raise(W26, W26, ("GROUP", "VOTABLE", "1.2"), config, pos)
group = Group(self, config=config, pos=pos, **data)
self.groups.append(group)
group.parse(iterator, config)
def _get_version_checks(self):
config = {}
config["version_1_1_or_later"] = util.version_compare(self.version, "1.1") >= 0
config["version_1_2_or_later"] = util.version_compare(self.version, "1.2") >= 0
config["version_1_3_or_later"] = util.version_compare(self.version, "1.3") >= 0
config["version_1_4_or_later"] = util.version_compare(self.version, "1.4") >= 0
return config
# Map VOTable version numbers to namespace URIs and schema information.
_version_namespace_map = {
# Version 1.0 isn't well-supported, but is allowed on parse (with a warning).
# It used DTD rather than schema, so this information would not be useful.
# By omitting 1.0 from this dict we can use the keys as the list of versions
# that are allowed in various other checks.
"1.1": {
"namespace_uri": "http://www.ivoa.net/xml/VOTable/v1.1",
"schema_location_attr": "xsi:noNamespaceSchemaLocation",
"schema_location_value": "http://www.ivoa.net/xml/VOTable/v1.1",
},
"1.2": {
"namespace_uri": "http://www.ivoa.net/xml/VOTable/v1.2",
"schema_location_attr": "xsi:noNamespaceSchemaLocation",
"schema_location_value": "http://www.ivoa.net/xml/VOTable/v1.2",
},
# With 1.3 we'll be more explicit with the schema location.
# - xsi:schemaLocation uses the namespace name along with the URL
# to reference it.
# - For convenience, but somewhat confusingly, the namespace URIs
# are also usable URLs for accessing an applicable schema.
# However to avoid confusion, we'll use the explicit schema URL.
"1.3": {
"namespace_uri": "http://www.ivoa.net/xml/VOTable/v1.3",
"schema_location_attr": "xsi:schemaLocation",
"schema_location_value": (
"http://www.ivoa.net/xml/VOTable/v1.3"
" http://www.ivoa.net/xml/VOTable/VOTable-1.3.xsd"
),
},
# With 1.4 namespace URIs stopped incrementing with minor version changes
# so we use the same URI as with 1.3. See this IVOA note for more info:
# http://www.ivoa.net/documents/Notes/XMLVers/20180529/
"1.4": {
"namespace_uri": "http://www.ivoa.net/xml/VOTable/v1.3",
"schema_location_attr": "xsi:schemaLocation",
"schema_location_value": (
"http://www.ivoa.net/xml/VOTable/v1.3"
" http://www.ivoa.net/xml/VOTable/VOTable-1.4.xsd"
),
},
}
def parse(self, iterator, config):
config["_current_table_number"] = 0
for start, tag, data, pos in iterator:
if start:
if tag == "xml":
pass
elif tag == "VOTABLE":
if "version" not in data:
warn_or_raise(W20, W20, self.version, config, pos)
config["version"] = self.version
else:
config["version"] = self._version = data["version"]
if config["version"].lower().startswith("v"):
warn_or_raise(W29, W29, config["version"], config, pos)
self._version = config["version"] = config["version"][1:]
if config["version"] not in self._version_namespace_map:
vo_warn(W21, config["version"], config, pos)
if "xmlns" in data:
ns_info = self._version_namespace_map.get(config["version"], {})
correct_ns = ns_info.get("namespace_uri")
if data["xmlns"] != correct_ns:
vo_warn(W41, (correct_ns, data["xmlns"]), config, pos)
else:
vo_warn(W42, (), config, pos)
break
else:
vo_raise(E19, (), config, pos)
config.update(self._get_version_checks())
tag_mapping = {
"PARAM": self._add_param,
"RESOURCE": self._add_resource,
"COOSYS": self._add_coosys,
"TIMESYS": self._add_timesys,
"INFO": self._add_info,
"DEFINITIONS": self._add_definitions,
"DESCRIPTION": self._ignore_add,
"GROUP": self._add_group,
}
for start, tag, data, pos in iterator:
if start:
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos
)
elif tag == "DESCRIPTION":
if self.description is not None:
warn_or_raise(W17, W17, "VOTABLE", config, pos)
self.description = data or None
if not len(self.resources) and config["version_1_2_or_later"]:
warn_or_raise(W53, W53, (), config, pos)
return self
def to_xml(
self,
fd,
compressed=False,
tabledata_format=None,
_debug_python_based_parser=False,
_astropy_version=None,
):
"""
Write to an XML file.
Parameters
----------
fd : str or file-like
Where to write the file. If a file-like object, must be writable.
compressed : bool, optional
When `True`, write to a gzip-compressed file. (Default:
`False`)
tabledata_format : str, optional
Override the format of the table(s) data to write. Must
be one of ``tabledata`` (text representation), ``binary`` or
``binary2``. By default, use the format that was specified
in each `Table` object as it was created or read in. See
:ref:`astropy:votable-serialization`.
"""
if tabledata_format is not None:
if tabledata_format.lower() not in ("tabledata", "binary", "binary2"):
raise ValueError(f"Unknown format type '{format}'")
kwargs = {
"version": self.version,
"tabledata_format": tabledata_format,
"_debug_python_based_parser": _debug_python_based_parser,
"_group_number": 1,
}
kwargs.update(self._get_version_checks())
with util.convert_to_writable_filelike(fd, compressed=compressed) as fd:
w = XMLWriter(fd)
version = self.version
if _astropy_version is None:
lib_version = astropy_version
else:
lib_version = _astropy_version
xml_header = """
<?xml version="1.0" encoding="utf-8"?>
<!-- Produced with astropy.io.votable version {lib_version}
http://www.astropy.org/ -->\n"""
w.write(xml_header.lstrip().format(**locals()))
# Build the VOTABLE tag attributes.
votable_attr = {
"version": version,
"xmlns:xsi": "http://www.w3.org/2001/XMLSchema-instance",
}
ns_info = self._version_namespace_map.get(version, {})
namespace_uri = ns_info.get("namespace_uri")
if namespace_uri:
votable_attr["xmlns"] = namespace_uri
schema_location_attr = ns_info.get("schema_location_attr")
schema_location_value = ns_info.get("schema_location_value")
if schema_location_attr and schema_location_value:
votable_attr[schema_location_attr] = schema_location_value
with w.tag("VOTABLE", votable_attr):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
element_sets = [
self.coordinate_systems,
self.time_systems,
self.params,
self.infos,
self.resources,
]
if kwargs["version_1_2_or_later"]:
element_sets[0] = self.groups
for element_set in element_sets:
for element in element_set:
element.to_xml(w, **kwargs)
def iter_tables(self):
"""
Iterates over all tables in the VOTable file in a "flat" way,
ignoring the nesting of resources etc.
"""
for resource in self.resources:
yield from resource.iter_tables()
def get_first_table(self):
"""
Often, you know there is only one table in the file, and
that's all you need. This method returns that first table.
"""
for table in self.iter_tables():
if not table.is_empty():
return table
raise IndexError("No table found in VOTABLE file.")
get_table_by_id = _lookup_by_attr_factory(
"ID",
True,
"iter_tables",
"TABLE",
"""
Looks up a TABLE_ element by the given ID. Used by the table
"ref" attribute.
""",
)
get_tables_by_utype = _lookup_by_attr_factory(
"utype",
False,
"iter_tables",
"TABLE",
"""
Looks up a TABLE_ element by the given utype, and returns an
iterator emitting all matches.
""",
)
def get_table_by_index(self, idx):
"""
Get a table by its ordinal position in the file.
"""
for i, table in enumerate(self.iter_tables()):
if i == idx:
return table
raise IndexError(f"No table at index {idx:d} found in VOTABLE file.")
def iter_fields_and_params(self):
"""
Recursively iterate over all FIELD_ and PARAM_ elements in the
VOTABLE_ file.
"""
for resource in self.resources:
yield from resource.iter_fields_and_params()
get_field_by_id = _lookup_by_attr_factory(
"ID",
True,
"iter_fields_and_params",
"FIELD",
"""
Looks up a FIELD_ element by the given ID_. Used by the field's
"ref" attribute.
""",
)
get_fields_by_utype = _lookup_by_attr_factory(
"utype",
False,
"iter_fields_and_params",
"FIELD",
"""
Looks up a FIELD_ element by the given utype and returns an
iterator emitting all matches.
""",
)
get_field_by_id_or_name = _lookup_by_id_or_name_factory(
"iter_fields_and_params",
"FIELD",
"""
Looks up a FIELD_ element by the given ID_ or name.
""",
)
def iter_values(self):
"""
Recursively iterate over all VALUES_ elements in the VOTABLE_
file.
"""
for field in self.iter_fields_and_params():
yield field.values
get_values_by_id = _lookup_by_attr_factory(
"ID",
True,
"iter_values",
"VALUES",
"""
Looks up a VALUES_ element by the given ID. Used by the values
"ref" attribute.
""",
)
def iter_groups(self):
"""
Recursively iterate over all GROUP_ elements in the VOTABLE_
file.
"""
for table in self.iter_tables():
yield from table.iter_groups()
get_group_by_id = _lookup_by_attr_factory(
"ID",
True,
"iter_groups",
"GROUP",
"""
Looks up a GROUP_ element by the given ID. Used by the group's
"ref" attribute
""",
)
get_groups_by_utype = _lookup_by_attr_factory(
"utype",
False,
"iter_groups",
"GROUP",
"""
Looks up a GROUP_ element by the given utype and returns an
iterator emitting all matches.
""",
)
def iter_coosys(self):
"""
Recursively iterate over all COOSYS_ elements in the VOTABLE_
file.
"""
yield from self.coordinate_systems
for resource in self.resources:
yield from resource.iter_coosys()
get_coosys_by_id = _lookup_by_attr_factory(
"ID",
True,
"iter_coosys",
"COOSYS",
"""Looks up a COOSYS_ element by the given ID.""",
)
def iter_timesys(self):
"""
Recursively iterate over all TIMESYS_ elements in the VOTABLE_
file.
"""
yield from self.time_systems
for resource in self.resources:
yield from resource.iter_timesys()
get_timesys_by_id = _lookup_by_attr_factory(
"ID",
True,
"iter_timesys",
"TIMESYS",
"""Looks up a TIMESYS_ element by the given ID.""",
)
def iter_info(self):
"""
Recursively iterate over all INFO_ elements in the VOTABLE_
file.
"""
yield from self.infos
for resource in self.resources:
yield from resource.iter_info()
get_info_by_id = _lookup_by_attr_factory(
"ID", True, "iter_info", "INFO", """Looks up a INFO element by the given ID."""
)
def set_all_tables_format(self, format):
"""
Set the output storage format of all tables in the file.
"""
for table in self.iter_tables():
table.format = format
@classmethod
def from_table(cls, table, table_id=None):
"""
Create a `VOTableFile` instance from a given
`astropy.table.Table` instance.
Parameters
----------
table_id : str, optional
Set the given ID attribute on the returned Table instance.
"""
votable_file = cls()
resource = Resource()
votable = Table.from_table(votable_file, table)
if table_id is not None:
votable.ID = table_id
resource.tables.append(votable)
votable_file.resources.append(resource)
return votable_file
|
ee6dbc01481592eb921156fd0228afd3b70065f3dbdb2d290771f813cc6aa646 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file contains a contains the high-level functions to read a
VOTable file.
"""
# STDLIB
import io
import os
import sys
import textwrap
import warnings
from astropy.utils import data
from astropy.utils.decorators import deprecated_renamed_argument
from astropy.utils.xml import iterparser
# LOCAL
from . import exceptions, tree
__all__ = [
"parse",
"parse_single_table",
"from_table",
"writeto",
"validate",
"reset_vo_warnings",
]
VERIFY_OPTIONS = ["ignore", "warn", "exception"]
@deprecated_renamed_argument("pedantic", "verify", since="5.0")
def parse(
source,
columns=None,
invalid="exception",
verify=None,
chunk_size=tree.DEFAULT_CHUNK_SIZE,
table_number=None,
table_id=None,
filename=None,
unit_format=None,
datatype_mapping=None,
_debug_python_based_parser=False,
):
"""
Parses a VOTABLE_ xml file (or file-like object), and returns a
`~astropy.io.votable.tree.VOTableFile` object.
Parameters
----------
source : path-like or file-like
Path or file-like object containing a VOTABLE_ xml file.
If file, must be readable.
columns : sequence of str, optional
List of field names to include in the output. The default is
to include all fields.
invalid : str, optional
One of the following values:
- 'exception': throw an exception when an invalid value is
encountered (default)
- 'mask': mask out invalid values
verify : {'ignore', 'warn', 'exception'}, optional
When ``'exception'``, raise an error when the file violates the spec,
otherwise either issue a warning (``'warn'``) or silently continue
(``'ignore'``). Warnings may be controlled using the standard Python
mechanisms. See the `warnings` module in the Python standard library
for more information. When not provided, uses the configuration setting
``astropy.io.votable.verify``, which defaults to 'ignore'.
.. versionchanged:: 4.0
``verify`` replaces the ``pedantic`` argument, which will be
deprecated in future.
.. versionchanged:: 5.0
The ``pedantic`` argument is deprecated.
chunk_size : int, optional
The number of rows to read before converting to an array.
Higher numbers are likely to be faster, but will consume more
memory.
table_number : int, optional
The number of table in the file to read in. If `None`, all
tables will be read. If a number, 0 refers to the first table
in the file, and only that numbered table will be parsed and
read in. Should not be used with ``table_id``.
table_id : str, optional
The ID of the table in the file to read in. Should not be
used with ``table_number``.
filename : str, optional
A filename, URL or other identifier to use in error messages.
If *filename* is None and *source* is a string (i.e. a path),
then *source* will be used as a filename for error messages.
Therefore, *filename* is only required when source is a
file-like object.
unit_format : str, astropy.units.format.Base instance or None, optional
The unit format to use when parsing unit attributes. If a
string, must be the name of a unit formatter. The built-in
formats include ``generic``, ``fits``, ``cds``, and
``vounit``. A custom formatter may be provided by passing a
`~astropy.units.UnitBase` instance. If `None` (default),
the unit format to use will be the one specified by the
VOTable specification (which is ``cds`` up to version 1.3 of
VOTable, and ``vounit`` in more recent versions of the spec).
datatype_mapping : dict, optional
A mapping of datatype names (`str`) to valid VOTable datatype names
(str). For example, if the file being read contains the datatype
"unsignedInt" (an invalid datatype in VOTable), include the mapping
``{"unsignedInt": "long"}``.
Returns
-------
votable : `~astropy.io.votable.tree.VOTableFile` object
See Also
--------
astropy.io.votable.exceptions : The exceptions this function may raise.
"""
from . import conf
invalid = invalid.lower()
if invalid not in ("exception", "mask"):
raise ValueError(
"accepted values of ``invalid`` are: ``'exception'`` or ``'mask'``."
)
if verify is None:
conf_verify_lowercase = conf.verify.lower()
# We need to allow verify to be booleans as strings since the
# configuration framework doesn't make it easy/possible to have mixed
# types.
if conf_verify_lowercase in ["false", "true"]:
verify = conf_verify_lowercase == "true"
else:
verify = conf_verify_lowercase
if isinstance(verify, bool):
verify = "exception" if verify else "warn"
elif verify not in VERIFY_OPTIONS:
raise ValueError(f"verify should be one of {'/'.join(VERIFY_OPTIONS)}")
if datatype_mapping is None:
datatype_mapping = {}
config = {
"columns": columns,
"invalid": invalid,
"verify": verify,
"chunk_size": chunk_size,
"table_number": table_number,
"filename": filename,
"unit_format": unit_format,
"datatype_mapping": datatype_mapping,
}
if isinstance(source, str):
source = os.path.expanduser(source)
if filename is None and isinstance(source, str):
config["filename"] = source
with iterparser.get_xml_iterator(
source, _debug_python_based_parser=_debug_python_based_parser
) as iterator:
return tree.VOTableFile(config=config, pos=(1, 1)).parse(iterator, config)
def parse_single_table(source, **kwargs):
"""
Parses a VOTABLE_ xml file (or file-like object), reading and
returning only the first `~astropy.io.votable.tree.Table`
instance.
See `parse` for a description of the keyword arguments.
Returns
-------
votable : `~astropy.io.votable.tree.Table` object
"""
if kwargs.get("table_number") is None:
kwargs["table_number"] = 0
votable = parse(source, **kwargs)
return votable.get_first_table()
def writeto(table, file, tabledata_format=None):
"""
Writes a `~astropy.io.votable.tree.VOTableFile` to a VOTABLE_ xml file.
Parameters
----------
table : `~astropy.io.votable.tree.VOTableFile` or `~astropy.table.Table` instance.
file : str or writable file-like
Path or file object to write to
tabledata_format : str, optional
Override the format of the table(s) data to write. Must be
one of ``tabledata`` (text representation), ``binary`` or
``binary2``. By default, use the format that was specified in
each ``table`` object as it was created or read in. See
:ref:`astropy:astropy:votable-serialization`.
"""
from astropy.table import Table
if isinstance(table, Table):
table = tree.VOTableFile.from_table(table)
elif not isinstance(table, tree.VOTableFile):
raise TypeError(
"first argument must be astropy.io.vo.VOTableFile or "
"astropy.table.Table instance"
)
table.to_xml(
file, tabledata_format=tabledata_format, _debug_python_based_parser=True
)
def validate(source, output=sys.stdout, xmllint=False, filename=None):
"""
Prints a validation report for the given file.
Parameters
----------
source : path-like or file-like
Path to a VOTABLE_ xml file or `~pathlib.Path`
object having Path to a VOTABLE_ xml file.
If file-like object, must be readable.
output : file-like, optional
Where to output the report. Defaults to ``sys.stdout``.
If `None`, the output will be returned as a string.
Must be writable.
xmllint : bool, optional
When `True`, also send the file to ``xmllint`` for schema and
DTD validation. Requires that ``xmllint`` is installed. The
default is `False`. ``source`` must be a file on the local
filesystem in order for ``xmllint`` to work.
filename : str, optional
A filename to use in the error messages. If not provided, one
will be automatically determined from ``source``.
Returns
-------
is_valid : bool or str
Returns `True` if no warnings were found. If ``output`` is
`None`, the return value will be a string.
"""
from astropy.utils.console import color_print, print_code_line
return_as_str = False
if output is None:
output = io.StringIO()
return_as_str = True
lines = []
votable = None
reset_vo_warnings()
if isinstance(source, str):
source = os.path.expanduser(source)
with data.get_readable_fileobj(source, encoding="binary") as fd:
content = fd.read()
content_buffer = io.BytesIO(content)
content_buffer.seek(0)
if filename is None:
if isinstance(source, str):
filename = source
elif hasattr(source, "name"):
filename = source.name
elif hasattr(source, "url"):
filename = source.url
else:
filename = "<unknown>"
with warnings.catch_warnings(record=True) as warning_lines:
warnings.resetwarnings()
warnings.simplefilter("always", exceptions.VOWarning, append=True)
try:
votable = parse(content_buffer, verify="warn", filename=filename)
except ValueError as e:
lines.append(str(e))
lines = [
str(x.message)
for x in warning_lines
if issubclass(x.category, exceptions.VOWarning)
] + lines
content_buffer.seek(0)
output.write(f"Validation report for {filename}\n\n")
if len(lines):
xml_lines = iterparser.xml_readlines(content_buffer)
for warning in lines:
w = exceptions.parse_vowarning(warning)
if not w["is_something"]:
output.write(w["message"])
output.write("\n\n")
else:
line = xml_lines[w["nline"] - 1]
warning = w["warning"]
if w["is_warning"]:
color = "yellow"
else:
color = "red"
color_print(
f"{w['nline']:d}: ",
"",
warning or "EXC",
color,
": ",
"",
textwrap.fill(
w["message"],
initial_indent=" ",
subsequent_indent=" ",
).lstrip(),
file=output,
)
print_code_line(line, w["nchar"], file=output)
output.write("\n")
else:
output.write("astropy.io.votable found no violations.\n\n")
success = 0
if xmllint and os.path.exists(filename):
from . import xmlutil
if votable is None:
version = "1.1"
else:
version = votable.version
success, stdout, stderr = xmlutil.validate_schema(filename, version)
if success != 0:
output.write("xmllint schema violations:\n\n")
output.write(stderr.decode("utf-8"))
else:
output.write("xmllint passed\n")
if return_as_str:
return output.getvalue()
return len(lines) == 0 and success == 0
def from_table(table, table_id=None):
"""
Given an `~astropy.table.Table` object, return a
`~astropy.io.votable.tree.VOTableFile` file structure containing
just that single table.
Parameters
----------
table : `~astropy.table.Table` instance
table_id : str, optional
If not `None`, set the given id on the returned
`~astropy.io.votable.tree.Table` instance.
Returns
-------
votable : `~astropy.io.votable.tree.VOTableFile` instance
"""
return tree.VOTableFile.from_table(table, table_id=table_id)
def is_votable(source):
"""
Reads the header of a file to determine if it is a VOTable file.
Parameters
----------
source : path-like or file-like
Path or file object containing a VOTABLE_ xml file.
If file, must be readable.
Returns
-------
is_votable : bool
Returns `True` if the given file is a VOTable file.
"""
if isinstance(source, str):
source = os.path.expanduser(source)
try:
with iterparser.get_xml_iterator(source) as iterator:
for start, tag, d, pos in iterator:
if tag != "xml":
return False
break
for start, tag, d, pos in iterator:
if tag != "VOTABLE":
return False
break
return True
except ValueError:
return False
def reset_vo_warnings():
"""
Resets all of the vo warning state so that warnings that
have already been emitted will be emitted again. This is
used, for example, by `validate` which must emit all
warnings each time it is called.
"""
from . import converters, xmlutil
# -----------------------------------------------------------#
# This is a special variable used by the Python warnings #
# infrastructure to keep track of warnings that have #
# already been seen. Since we want to get every single #
# warning out of this, we have to delete all of them first. #
# -----------------------------------------------------------#
for module in (converters, exceptions, tree, xmlutil):
try:
del module.__warningregistry__
except AttributeError:
pass
|
2c040bed1758d99eaf3ff285b01257af11106cd1bf5783ccf6ef60016af16c29 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""VOTable exceptions and warnings.
.. _warnings:
Warnings
--------
.. note::
Most of the following warnings indicate violations of the VOTable
specification. They should be reported to the authors of the
tools that produced the VOTable file.
To control the warnings emitted, use the standard Python
:mod:`warnings` module and the ``astropy.io.votable.exceptions.conf.max_warnings``
configuration item. Most of these are of the type `VOTableSpecWarning`.
{warnings}
.. _exceptions:
Exceptions
----------
.. note::
This is a list of many of the fatal exceptions emitted by ``astropy.io.votable``
when the file does not conform to spec. Other exceptions may be
raised due to unforeseen cases or bugs in ``astropy.io.votable`` itself.
{exceptions}
"""
# STDLIB
import io
import re
from textwrap import dedent
from warnings import warn
from astropy import config as _config
from astropy.utils.exceptions import AstropyWarning
__all__ = [
"Conf",
"conf",
"warn_or_raise",
"vo_raise",
"vo_reraise",
"vo_warn",
"warn_unknown_attrs",
"parse_vowarning",
"VOWarning",
"VOTableChangeWarning",
"VOTableSpecWarning",
"UnimplementedWarning",
"IOWarning",
"VOTableSpecError",
]
# NOTE: Cannot put this in __init__.py due to circular import.
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.io.votable.exceptions`.
"""
max_warnings = _config.ConfigItem(
10,
"Number of times the same type of warning is displayed before being suppressed",
cfgtype="integer",
)
conf = Conf()
def _format_message(message, name, config=None, pos=None):
if config is None:
config = {}
if pos is None:
pos = ("?", "?")
filename = config.get("filename", "?")
return f"{filename}:{pos[0]}:{pos[1]}: {name}: {message}"
def _suppressed_warning(warning, config, stacklevel=2):
warning_class = type(warning)
config.setdefault("_warning_counts", dict()).setdefault(warning_class, 0)
config["_warning_counts"][warning_class] += 1
message_count = config["_warning_counts"][warning_class]
if message_count <= conf.max_warnings:
if message_count == conf.max_warnings:
warning.formatted_message += (
" (suppressing further warnings of this type...)"
)
warn(warning, stacklevel=stacklevel + 1)
def warn_or_raise(
warning_class, exception_class=None, args=(), config=None, pos=None, stacklevel=1
):
"""
Warn or raise an exception, depending on the verify setting.
"""
if config is None:
config = {}
# NOTE: the default here is deliberately warn rather than ignore, since
# one would expect that calling warn_or_raise without config should not
# silence the warnings.
config_value = config.get("verify", "warn")
if config_value == "exception":
if exception_class is None:
exception_class = warning_class
vo_raise(exception_class, args, config, pos)
elif config_value == "warn":
vo_warn(warning_class, args, config, pos, stacklevel=stacklevel + 1)
def vo_raise(exception_class, args=(), config=None, pos=None):
"""
Raise an exception, with proper position information if available.
"""
if config is None:
config = {}
raise exception_class(args, config, pos)
def vo_reraise(exc, config=None, pos=None, additional=""):
"""
Raise an exception, with proper position information if available.
Restores the original traceback of the exception, and should only
be called within an "except:" block of code.
"""
if config is None:
config = {}
message = _format_message(str(exc), exc.__class__.__name__, config, pos)
if message.split()[0] == str(exc).split()[0]:
message = str(exc)
if len(additional):
message += " " + additional
exc.args = (message,)
raise exc
def vo_warn(warning_class, args=(), config=None, pos=None, stacklevel=1):
"""
Warn, with proper position information if available.
"""
if config is None:
config = {}
# NOTE: the default here is deliberately warn rather than ignore, since
# one would expect that calling warn_or_raise without config should not
# silence the warnings.
if config.get("verify", "warn") != "ignore":
warning = warning_class(args, config, pos)
_suppressed_warning(warning, config, stacklevel=stacklevel + 1)
def warn_unknown_attrs(element, attrs, config, pos, good_attr=[], stacklevel=1):
for attr in attrs:
if attr not in good_attr:
vo_warn(W48, (attr, element), config, pos, stacklevel=stacklevel + 1)
_warning_pat = re.compile(
r":?(?P<nline>[0-9?]+):(?P<nchar>[0-9?]+): "
+ r"((?P<warning>[WE]\d+): )?(?P<rest>.*)$"
)
def parse_vowarning(line):
"""
Parses the vo warning string back into its parts.
"""
result = {}
match = _warning_pat.search(line)
if match:
result["warning"] = warning = match.group("warning")
if warning is not None:
result["is_warning"] = warning[0].upper() == "W"
result["is_exception"] = not result["is_warning"]
result["number"] = int(match.group("warning")[1:])
result["doc_url"] = f"io/votable/api_exceptions.html#{warning.lower()}"
else:
result["is_warning"] = False
result["is_exception"] = False
result["is_other"] = True
result["number"] = None
result["doc_url"] = None
try:
result["nline"] = int(match.group("nline"))
except ValueError:
result["nline"] = 0
try:
result["nchar"] = int(match.group("nchar"))
except ValueError:
result["nchar"] = 0
result["message"] = match.group("rest")
result["is_something"] = True
else:
result["warning"] = None
result["is_warning"] = False
result["is_exception"] = False
result["is_other"] = False
result["is_something"] = False
if not isinstance(line, str):
line = line.decode("utf-8")
result["message"] = line
return result
class VOWarning(AstropyWarning):
"""
The base class of all VO warnings and exceptions.
Handles the formatting of the message with a warning or exception
code, filename, line and column number.
"""
default_args = ()
message_template = ""
def __init__(self, args, config=None, pos=None):
if config is None:
config = {}
if not isinstance(args, tuple):
args = (args,)
msg = self.message_template.format(*args)
self.formatted_message = _format_message(
msg, self.__class__.__name__, config, pos
)
Warning.__init__(self, self.formatted_message)
def __str__(self):
return self.formatted_message
@classmethod
def get_short_name(cls):
if len(cls.default_args):
return cls.message_template.format(*cls.default_args)
return cls.message_template
class VOTableChangeWarning(VOWarning, SyntaxWarning):
"""
A change has been made to the input XML file.
"""
class VOTableSpecWarning(VOWarning, SyntaxWarning):
"""
The input XML file violates the spec, but there is an obvious workaround.
"""
class UnimplementedWarning(VOWarning, SyntaxWarning):
"""
A feature of the VOTABLE_ spec is not implemented.
"""
class IOWarning(VOWarning, RuntimeWarning):
"""
A network or IO error occurred, but was recovered using the cache.
"""
class VOTableSpecError(VOWarning, ValueError):
"""
The input XML file violates the spec and there is no good workaround.
"""
class W01(VOTableSpecWarning):
"""Array uses commas rather than whitespace.
The VOTable spec states:
If a cell contains an array or complex number, it should be
encoded as multiple numbers separated by whitespace.
Many VOTable files in the wild use commas as a separator instead,
and ``astropy.io.votable`` can support this convention depending on the
:ref:`astropy:verifying-votables` setting.
``astropy.io.votable`` always outputs files using only spaces, regardless of
how they were input.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#toc-header-35>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:TABLEDATA>`__
"""
message_template = "Array uses commas rather than whitespace"
class W02(VOTableSpecWarning):
r"""Nonstandard XML id.
XML ids must match the following regular expression::
^[A-Za-z_][A-Za-z0-9_\.\-]*$
The VOTable 1.1 says the following:
According to the XML standard, the attribute ``ID`` is a
string beginning with a letter or underscore (``_``), followed
by a sequence of letters, digits, or any of the punctuation
characters ``.`` (dot), ``-`` (dash), ``_`` (underscore), or
``:`` (colon).
However, this is in conflict with the XML standard, which says
colons may not be used. VOTable 1.1's own schema does not allow a
colon here. Therefore, ``astropy.io.votable`` disallows the colon.
VOTable 1.2 corrects this error in the specification.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`XML Names <https://www.w3.org/TR/xml-names/>`__
"""
message_template = "{} attribute '{}' is invalid. Must be a standard XML id"
default_args = ("x", "y")
class W03(VOTableChangeWarning):
"""Implicitly generating an ID from a name.
The VOTable 1.1 spec says the following about ``name`` vs. ``ID``
on ``FIELD`` and ``VALUE`` elements:
``ID`` and ``name`` attributes have a different role in
VOTable: the ``ID`` is meant as a *unique identifier* of an
element seen as a VOTable component, while the ``name`` is
meant for presentation purposes, and need not to be unique
throughout the VOTable document. The ``ID`` attribute is
therefore required in the elements which have to be
referenced, but in principle any element may have an ``ID``
attribute. ... In summary, the ``ID`` is different from the
``name`` attribute in that (a) the ``ID`` attribute is made
from a restricted character set, and must be unique throughout
a VOTable document whereas names are standard XML attributes
and need not be unique; and (b) there should be support in the
parsing software to look up references and extract the
relevant element with matching ``ID``.
It is further recommended in the VOTable 1.2 spec:
While the ``ID`` attribute has to be unique in a VOTable
document, the ``name`` attribute need not. It is however
recommended, as a good practice, to assign unique names within
a ``TABLE`` element. This recommendation means that, between a
``TABLE`` and its corresponding closing ``TABLE`` tag,
``name`` attributes of ``FIELD``, ``PARAM`` and optional
``GROUP`` elements should be all different.
Since ``astropy.io.votable`` requires a unique identifier for each of its
columns, ``ID`` is used for the column name when present.
However, when ``ID`` is not present, (since it is not required by
the specification) ``name`` is used instead. However, ``name``
must be cleansed by replacing invalid characters (such as
whitespace) with underscores.
.. note::
This warning does not indicate that the input file is invalid
with respect to the VOTable specification, only that the
column names in the record array may not match exactly the
``name`` attributes specified in the file.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "Implicitly generating an ID from a name '{}' -> '{}'"
default_args = ("x", "y")
class W04(VOTableSpecWarning):
"""
The ``content-type`` attribute must use MIME content-type syntax as
defined in `RFC 2046 <https://tools.ietf.org/html/rfc2046>`__.
The current check for validity is somewhat over-permissive.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:link>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:link>`__
"""
message_template = "content-type '{}' must be a valid MIME content type"
default_args = ("x",)
class W05(VOTableSpecWarning):
"""
The attribute must be a valid URI as defined in `RFC 2396
<https://www.ietf.org/rfc/rfc2396.txt>`_.
"""
message_template = "'{}' is not a valid URI"
default_args = ("x",)
class W06(VOTableSpecWarning):
"""
This warning is emitted when a ``ucd`` attribute does not match
the syntax of a `unified content descriptor
<http://vizier.u-strasbg.fr/doc/UCD.htx>`__.
If the VOTable version is 1.2 or later, the UCD will also be
checked to ensure it conforms to the controlled vocabulary defined
by UCD1+.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:ucd>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:ucd>`__
"""
message_template = "Invalid UCD '{}': {}"
default_args = ("x", "explanation")
class W07(VOTableSpecWarning):
"""Invalid astroYear.
As astro year field is a Besselian or Julian year matching the
regular expression::
^[JB]?[0-9]+([.][0-9]*)?$
Defined in this XML Schema snippet::
<xs:simpleType name="astroYear">
<xs:restriction base="xs:token">
<xs:pattern value="[JB]?[0-9]+([.][0-9]*)?"/>
</xs:restriction>
</xs:simpleType>
"""
message_template = "Invalid astroYear in {}: '{}'"
default_args = ("x", "y")
class W08(VOTableSpecWarning):
"""
To avoid local-dependent number parsing differences, ``astropy.io.votable``
may require a string or unicode string where a numeric type may
make more sense.
"""
message_template = "'{}' must be a str or bytes object"
default_args = ("x",)
class W09(VOTableSpecWarning):
"""
The VOTable specification uses the attribute name ``ID`` (with
uppercase letters) to specify unique identifiers. Some
VOTable-producing tools use the more standard lowercase ``id``
instead. ``astropy.io.votable`` accepts ``id`` and emits this warning if
``verify`` is ``'warn'``.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "ID attribute not capitalized"
class W10(VOTableSpecWarning):
"""
The parser has encountered an element that does not exist in the
specification, or appears in an invalid context. Check the file
against the VOTable schema (with a tool such as `xmllint
<http://xmlsoft.org/xmllint.html>`__. If the file validates
against the schema, and you still receive this warning, this may
indicate a bug in ``astropy.io.votable``.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "Unknown tag '{}'. Ignoring"
default_args = ("x",)
class W11(VOTableSpecWarning):
"""
Earlier versions of the VOTable specification used a ``gref``
attribute on the ``LINK`` element to specify a `GLU reference
<http://aladin.u-strasbg.fr/glu/>`__. New files should
specify a ``glu:`` protocol using the ``href`` attribute.
Since ``astropy.io.votable`` does not currently support GLU references, it
likewise does not automatically convert the ``gref`` attribute to
the new form.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:link>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:link>`__
"""
message_template = "The gref attribute on LINK is deprecated in VOTable 1.1"
class W12(VOTableChangeWarning):
"""
In order to name the columns of the Numpy record array, each
``FIELD`` element must have either an ``ID`` or ``name`` attribute
to derive a name from. Strictly speaking, according to the
VOTable schema, the ``name`` attribute is required. However, if
``name`` is not present by ``ID`` is, and ``verify`` is not ``'exception'``,
``astropy.io.votable`` will continue without a ``name`` defined.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = (
"'{}' element must have at least one of 'ID' or 'name' attributes"
)
default_args = ("x",)
class W13(VOTableSpecWarning):
"""Invalid VOTable datatype.
Some VOTable files in the wild use non-standard datatype names. These
are mapped to standard ones using the following mapping::
string -> char
unicodeString -> unicodeChar
int16 -> short
int32 -> int
int64 -> long
float32 -> float
float64 -> double
unsignedInt -> long
unsignedShort -> int
To add more datatype mappings during parsing, use the
``datatype_mapping`` keyword to `astropy.io.votable.parse`.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "'{}' is not a valid VOTable datatype, should be '{}'"
default_args = ("x", "y")
# W14: Deprecated
class W15(VOTableSpecWarning):
"""
The ``name`` attribute is required on every ``FIELD`` element.
However, many VOTable files in the wild omit it and provide only
an ``ID`` instead. In this case, when ``verify`` is not ``'exception'``
``astropy.io.votable`` will copy the ``name`` attribute to a new ``ID``
attribute.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "{} element missing required 'name' attribute"
default_args = ("x",)
# W16: Deprecated
class W17(VOTableSpecWarning):
"""
A ``DESCRIPTION`` element can only appear once within its parent
element.
According to the schema, it may only occur once (`1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__)
However, it is a `proposed extension
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:addesc>`__
to VOTable 1.2.
"""
message_template = "{} element contains more than one DESCRIPTION element"
default_args = ("x",)
class W18(VOTableSpecWarning):
"""
The number of rows explicitly specified in the ``nrows`` attribute
does not match the actual number of rows (``TR`` elements) present
in the ``TABLE``. This may indicate truncation of the file, or an
internal error in the tool that produced it. If ``verify`` is not
``'exception'``, parsing will proceed, with the loss of some performance.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC10>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC10>`__
"""
message_template = "TABLE specified nrows={}, but table contains {} rows"
default_args = ("x", "y")
class W19(VOTableSpecWarning):
"""
The column fields as defined using ``FIELD`` elements do not match
those in the headers of the embedded FITS file. If ``verify`` is not
``'exception'``, the embedded FITS file will take precedence.
"""
message_template = (
"The fields defined in the VOTable do not match those in the "
+ "embedded FITS file"
)
class W20(VOTableSpecWarning):
"""
If no version number is explicitly given in the VOTable file, the
parser assumes it is written to the VOTable 1.1 specification.
"""
message_template = "No version number specified in file. Assuming {}"
default_args = ("1.1",)
class W21(UnimplementedWarning):
"""
Unknown issues may arise using ``astropy.io.votable`` with VOTable files
from a version other than 1.1, 1.2, 1.3, or 1.4.
"""
message_template = (
"astropy.io.votable is designed for VOTable version 1.1, 1.2, 1.3,"
" and 1.4, but this file is {}"
)
default_args = ("x",)
class W22(VOTableSpecWarning):
"""
Version 1.0 of the VOTable specification used the ``DEFINITIONS``
element to define coordinate systems. Version 1.1 now uses
``COOSYS`` elements throughout the document.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:definitions>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:definitions>`__
"""
message_template = "The DEFINITIONS element is deprecated in VOTable 1.1. Ignoring"
class W23(IOWarning):
"""
Raised when the VO service database can not be updated (possibly
due to a network outage). This is only a warning, since an older
and possible out-of-date VO service database was available
locally.
"""
message_template = "Unable to update service information for '{}'"
default_args = ("x",)
class W24(VOWarning, FutureWarning):
"""
The VO catalog database retrieved from the www is designed for a
newer version of ``astropy.io.votable``. This may cause problems or limited
features performing service queries. Consider upgrading ``astropy.io.votable``
to the latest version.
"""
message_template = (
"The VO catalog database is for a later version of astropy.io.votable"
)
class W25(IOWarning):
"""
A VO service query failed due to a network error or malformed
arguments. Another alternative service may be attempted. If all
services fail, an exception will be raised.
"""
message_template = "'{}' failed with: {}"
default_args = ("service", "...")
class W26(VOTableSpecWarning):
"""
The given element was not supported inside of the given element
until the specified VOTable version, however the version declared
in the file is for an earlier version. These attributes may not
be written out to the file.
"""
message_template = "'{}' inside '{}' added in VOTable {}"
default_args = ("child", "parent", "X.X")
class W27(VOTableSpecWarning):
"""
The ``COOSYS`` element was deprecated in VOTABLE version 1.2 in
favor of a reference to the Space-Time Coordinate (STC) data
model (see `utype
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:utype>`__
and the IVOA note `referencing STC in VOTable
<http://ivoa.net/Documents/latest/VOTableSTC.html>`__.
"""
message_template = "COOSYS deprecated in VOTable 1.2"
class W28(VOTableSpecWarning):
"""
The given attribute was not supported on the given element until the
specified VOTable version, however the version declared in the file is
for an earlier version. These attributes may not be written out to
the file.
"""
message_template = "'{}' on '{}' added in VOTable {}"
default_args = ("attribute", "element", "X.X")
class W29(VOTableSpecWarning):
"""
Some VOTable files specify their version number in the form "v1.0",
when the only supported forms in the spec are "1.0".
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "Version specified in non-standard form '{}'"
default_args = ("v1.0",)
class W30(VOTableSpecWarning):
"""
Some VOTable files write missing floating-point values in non-standard ways,
such as "null" and "-". If ``verify`` is not ``'exception'``, any
non-standard floating-point literals are treated as missing values.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Invalid literal for float '{}'. Treating as empty."
default_args = ("x",)
class W31(VOTableSpecWarning):
"""
Since NaN's can not be represented in integer fields directly, a null
value must be specified in the FIELD descriptor to support reading
NaN's from the tabledata.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "NaN given in an integral field without a specified null value"
class W32(VOTableSpecWarning):
"""
Each field in a table must have a unique ID. If two or more fields
have the same ID, some will be renamed to ensure that all IDs are
unique.
From the VOTable 1.2 spec:
The ``ID`` and ``ref`` attributes are defined as XML types
``ID`` and ``IDREF`` respectively. This means that the
contents of ``ID`` is an identifier which must be unique
throughout a VOTable document, and that the contents of the
``ref`` attribute represents a reference to an identifier
which must exist in the VOTable document.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "Duplicate ID '{}' renamed to '{}' to ensure uniqueness"
default_args = ("x", "x_2")
class W33(VOTableChangeWarning):
"""
Each field in a table must have a unique name. If two or more
fields have the same name, some will be renamed to ensure that all
names are unique.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "Column name '{}' renamed to '{}' to ensure uniqueness"
default_args = ("x", "x_2")
class W34(VOTableSpecWarning):
"""
The attribute requires the value to be a valid XML token, as
defined by `XML 1.0
<http://www.w3.org/TR/2000/WD-xml-2e-20000814#NT-Nmtoken>`__.
"""
message_template = "'{}' is an invalid token for attribute '{}'"
default_args = ("x", "y")
class W35(VOTableSpecWarning):
"""
The ``name`` and ``value`` attributes are required on all ``INFO``
elements.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC32>`__
"""
message_template = "'{}' attribute required for INFO elements"
default_args = ("x",)
class W36(VOTableSpecWarning):
"""
If the field specifies a ``null`` value, that value must conform
to the given ``datatype``.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__
"""
message_template = "null value '{}' does not match field datatype, setting to 0"
default_args = ("x",)
class W37(UnimplementedWarning):
"""
The 3 datatypes defined in the VOTable specification and supported by
``astropy.io.votable`` are ``TABLEDATA``, ``BINARY`` and ``FITS``.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:data>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:data>`__
"""
message_template = "Unsupported data format '{}'"
default_args = ("x",)
class W38(VOTableSpecWarning):
"""
The only encoding for local binary data supported by the VOTable
specification is base64.
"""
message_template = "Inline binary data must be base64 encoded, got '{}'"
default_args = ("x",)
class W39(VOTableSpecWarning):
"""
Bit values do not support masking. This warning is raised upon
setting masked data in a bit column.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Bit values can not be masked"
class W40(VOTableSpecWarning):
"""
This is a terrible hack to support Simple Image Access Protocol
results from `NOIRLab Astro Data Archive <https://astroarchive.noirlab.edu/>`__. It
creates a field for the coordinate projection type of type "double",
which actually contains character data. We have to hack the field
to store character data, or we can't read it in. A warning will be
raised when this happens.
"""
message_template = "'cprojection' datatype repaired"
class W41(VOTableSpecWarning):
"""
An XML namespace was specified on the ``VOTABLE`` element, but the
namespace does not match what is expected for a ``VOTABLE`` file.
The ``VOTABLE`` namespace is::
http://www.ivoa.net/xml/VOTable/vX.X
where "X.X" is the version number.
Some files in the wild set the namespace to the location of the
VOTable schema, which is not correct and will not pass some
validating parsers.
"""
message_template = (
"An XML namespace is specified, but is incorrect. Expected '{}', got '{}'"
)
default_args = ("x", "y")
class W42(VOTableSpecWarning):
"""The root element should specify a namespace.
The ``VOTABLE`` namespace is::
http://www.ivoa.net/xml/VOTable/vX.X
where "X.X" is the version number.
"""
message_template = "No XML namespace specified"
class W43(VOTableSpecWarning):
"""Referenced elements should be defined before referees.
From the VOTable 1.2 spec:
In VOTable1.2, it is further recommended to place the ID
attribute prior to referencing it whenever possible.
"""
message_template = "{} ref='{}' which has not already been defined"
default_args = ("element", "x")
class W44(VOTableSpecWarning):
"""
``VALUES`` elements that reference another element should not have
their own content.
From the VOTable 1.2 spec:
The ``ref`` attribute of a ``VALUES`` element can be used to
avoid a repetition of the domain definition, by referring to a
previously defined ``VALUES`` element having the referenced
``ID`` attribute. When specified, the ``ref`` attribute
defines completely the domain without any other element or
attribute, as e.g. ``<VALUES ref="RAdomain"/>``
"""
message_template = "VALUES element with ref attribute has content ('{}')"
default_args = ("element",)
class W45(VOWarning, ValueError):
"""Invalid content-role attribute.
The ``content-role`` attribute on the ``LINK`` element must be one of
the following::
query, hints, doc, location
And in VOTable 1.3, additionally::
type
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
`1.3
<http://www.ivoa.net/documents/VOTable/20130315/PR-VOTable-1.3-20130315.html#sec:link>`__
"""
message_template = "content-role attribute '{}' invalid"
default_args = ("x",)
class W46(VOTableSpecWarning):
"""
The given char or unicode string is too long for the specified
field length.
"""
message_template = "{} value is too long for specified length of {}"
default_args = ("char or unicode", "x")
class W47(VOTableSpecWarning):
"""
If no arraysize is specified on a char field, the default of '1'
is implied, but this is rarely what is intended.
"""
message_template = "Missing arraysize indicates length 1"
class W48(VOTableSpecWarning):
"""
The attribute is not defined in the specification.
"""
message_template = "Unknown attribute '{}' on {}"
default_args = ("attribute", "element")
class W49(VOTableSpecWarning):
"""
Prior to VOTable 1.3, the empty cell was illegal for integer
fields.
If a \"null\" value was specified for the cell, it will be used
for the value, otherwise, 0 will be used.
"""
message_template = "Empty cell illegal for integer fields."
class W50(VOTableSpecWarning):
"""
Invalid unit string as defined in the `Units in the VO, Version 1.0
<https://www.ivoa.net/documents/VOUnits>`_ (VOTable version >= 1.4)
or `Standards for Astronomical Catalogues, Version 2.0
<http://cdsarc.u-strasbg.fr/doc/catstd-3.2.htx>`_ (version < 1.4).
Consider passing an explicit ``unit_format`` parameter if the units
in this file conform to another specification.
"""
message_template = "Invalid unit string '{}'"
default_args = ("x",)
class W51(VOTableSpecWarning):
"""
The integer value is out of range for the size of the field.
"""
message_template = "Value '{}' is out of range for a {} integer field"
default_args = ("x", "n-bit")
class W52(VOTableSpecWarning):
"""
The BINARY2 format was introduced in VOTable 1.3. It should
not be present in files marked as an earlier version.
"""
message_template = (
"The BINARY2 format was introduced in VOTable 1.3, but "
"this file is declared as version '{}'"
)
default_args = ("1.2",)
class W53(VOTableSpecWarning):
"""
The VOTABLE element must contain at least one RESOURCE element.
"""
message_template = "VOTABLE element must contain at least one RESOURCE element."
default_args = ()
class W54(VOTableSpecWarning):
"""
The TIMESYS element was introduced in VOTable 1.4. It should
not be present in files marked as an earlier version.
"""
message_template = (
"The TIMESYS element was introduced in VOTable 1.4, but "
"this file is declared as version '{}'"
)
default_args = ("1.3",)
class W55(VOTableSpecWarning):
"""
When non-ASCII characters are detected when reading
a TABLEDATA value for a FIELD with ``datatype="char"``, we
can issue this warning.
"""
message_template = (
'FIELD ({}) has datatype="char" but contains non-ASCII value ({})'
)
default_args = ("", "")
class E01(VOWarning, ValueError):
"""Invalid size specifier for a field.
The size specifier for a ``char`` or ``unicode`` field must be
only a number followed, optionally, by an asterisk.
Multi-dimensional size specifiers are not supported for these
datatypes.
Strings, which are defined as a set of characters, can be
represented in VOTable as a fixed- or variable-length array of
characters::
<FIELD name="unboundedString" datatype="char" arraysize="*"/>
A 1D array of strings can be represented as a 2D array of
characters, but given the logic above, it is possible to define a
variable-length array of fixed-length strings, but not a
fixed-length array of variable-length strings.
"""
message_template = "Invalid size specifier '{}' for a {} field (in field '{}')"
default_args = ("x", "char/unicode", "y")
class E02(VOWarning, ValueError):
"""Incorrect number of elements in array.
The number of array elements in the data does not match that specified
in the FIELD specifier.
"""
message_template = (
"Incorrect number of elements in array. Expected multiple of {}, got {}"
)
default_args = ("x", "y")
class E03(VOWarning, ValueError):
"""Complex numbers should be two values separated by whitespace.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "'{}' does not parse as a complex number"
default_args = ("x",)
class E04(VOWarning, ValueError):
"""A ``bit`` array should be a string of '0's and '1's.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Invalid bit value '{}'"
default_args = ("x",)
class E05(VOWarning, ValueError):
r"""Invalid boolean value.
A ``boolean`` value should be one of the following strings (case
insensitive) in the ``TABLEDATA`` format::
'TRUE', 'FALSE', '1', '0', 'T', 'F', '\0', ' ', '?'
and in ``BINARY`` format::
'T', 'F', '1', '0', '\0', ' ', '?'
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Invalid boolean value '{}'"
default_args = ("x",)
class E06(VOWarning, ValueError):
"""Unknown datatype on a field.
The supported datatypes are::
double, float, bit, boolean, unsignedByte, short, int, long,
floatComplex, doubleComplex, char, unicodeChar
The following non-standard aliases are also supported, but in
these case :ref:`W13 <W13>` will be raised::
string -> char
unicodeString -> unicodeChar
int16 -> short
int32 -> int
int64 -> long
float32 -> float
float64 -> double
unsignedInt -> long
unsignedShort -> int
To add more datatype mappings during parsing, use the
``datatype_mapping`` keyword to `astropy.io.votable.parse`.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Unknown datatype '{}' on field '{}'"
default_args = ("x", "y")
# E07: Deprecated
class E08(VOWarning, ValueError):
"""
The ``type`` attribute on the ``VALUES`` element must be either
``legal`` or ``actual``.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__
"""
message_template = "type must be 'legal' or 'actual', but is '{}'"
default_args = ("x",)
class E09(VOWarning, ValueError):
"""
The ``MIN``, ``MAX`` and ``OPTION`` elements must always have a
``value`` attribute.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__
"""
message_template = "'{}' must have a value attribute"
default_args = ("x",)
class E10(VOWarning, ValueError):
"""
From VOTable 1.1 and later, ``FIELD`` and ``PARAM`` elements must have
a ``datatype`` field.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:FIELD>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#elem:FIELD>`__
"""
message_template = "'datatype' attribute required on all '{}' elements"
default_args = ("FIELD",)
class E11(VOWarning, ValueError):
"""
The precision attribute is meant to express the number of significant
digits, either as a number of decimal places (e.g. ``precision="F2"`` or
equivalently ``precision="2"`` to express 2 significant figures
after the decimal point), or as a number of significant figures
(e.g. ``precision="E5"`` indicates a relative precision of 10-5).
It is validated using the following regular expression::
[EF]?[1-9][0-9]*
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:form>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:form>`__
"""
message_template = "precision '{}' is invalid"
default_args = ("x",)
class E12(VOWarning, ValueError):
"""
The width attribute is meant to indicate to the application the
number of characters to be used for input or output of the
quantity.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:form>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:form>`__
"""
message_template = "width must be a positive integer, got '{}'"
default_args = ("x",)
class E13(VOWarning, ValueError):
r"""Invalid arraysize attribute.
From the VOTable 1.2 spec:
A table cell can contain an array of a given primitive type,
with a fixed or variable number of elements; the array may
even be multidimensional. For instance, the position of a
point in a 3D space can be defined by the following::
<FIELD ID="point_3D" datatype="double" arraysize="3"/>
and each cell corresponding to that definition must contain
exactly 3 numbers. An asterisk (\*) may be appended to
indicate a variable number of elements in the array, as in::
<FIELD ID="values" datatype="int" arraysize="100*"/>
where it is specified that each cell corresponding to that
definition contains 0 to 100 integer numbers. The number may
be omitted to specify an unbounded array (in practice up to
=~2×10⁹ elements).
A table cell can also contain a multidimensional array of a
given primitive type. This is specified by a sequence of
dimensions separated by the ``x`` character, with the first
dimension changing fastest; as in the case of a simple array,
the last dimension may be variable in length. As an example,
the following definition declares a table cell which may
contain a set of up to 10 images, each of 64×64 bytes::
<FIELD ID="thumbs" datatype="unsignedByte" arraysize="64×64×10*"/>
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:dim>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:dim>`__
"""
message_template = "Invalid arraysize attribute '{}'"
default_args = ("x",)
class E14(VOWarning, ValueError):
"""
All ``PARAM`` elements must have a ``value`` attribute.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:FIELD>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#elem:FIELD>`__
"""
message_template = "value attribute is required for all PARAM elements"
class E15(VOWarning, ValueError):
"""All ``COOSYS`` elements must have an ``ID`` attribute.
Note that the VOTable 1.1 specification says this attribute is
optional, but its corresponding schema indicates it is required.
In VOTable 1.2, the ``COOSYS`` element is deprecated.
"""
message_template = "ID attribute is required for all COOSYS elements"
class E16(VOTableSpecWarning):
"""Incorrect ``system`` attribute on COOSYS element.
The ``system`` attribute must be one of the following::
'eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5', 'galactic',
'supergalactic', 'xy', 'barycentric', 'geo_app'
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:COOSYS>`__
"""
message_template = "Invalid system attribute '{}'"
default_args = ("x",)
class E17(VOWarning, ValueError):
"""
``extnum`` attribute must be a positive integer.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "extnum must be a positive integer"
class E18(VOWarning, ValueError):
"""
The ``type`` attribute of the ``RESOURCE`` element must be one of
"results" or "meta".
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "type must be 'results' or 'meta', not '{}'"
default_args = ("x",)
class E19(VOWarning, ValueError):
"""
Raised either when the file doesn't appear to be XML, or the root
element is not VOTABLE.
"""
message_template = "File does not appear to be a VOTABLE"
class E20(VOTableSpecError):
"""
The table had only *x* fields defined, but the data itself has more
columns than that.
"""
message_template = "Data has more columns than are defined in the header ({})"
default_args = ("x",)
class E21(VOWarning, ValueError):
"""
The table had *x* fields defined, but the data itself has only *y*
columns.
"""
message_template = "Data has fewer columns ({}) than are defined in the header ({})"
default_args = ("x", "y")
class E22(VOWarning, ValueError):
"""
All ``TIMESYS`` elements must have an ``ID`` attribute.
"""
message_template = "ID attribute is required for all TIMESYS elements"
class E23(VOTableSpecWarning):
"""
The ``timeorigin`` attribute on the ``TIMESYS`` element must be
either a floating point literal specifying a valid Julian Date,
or, for convenience, the string "MJD-origin" (standing for 2400000.5)
or the string "JD-origin" (standing for 0).
**References**: `1.4
<http://www.ivoa.net/documents/VOTable/20191021/REC-VOTable-1.4-20191021.html#ToC21>`__
"""
message_template = "Invalid timeorigin attribute '{}'"
default_args = ("x",)
class E24(VOWarning, ValueError):
"""
Non-ASCII unicode values should not be written when the FIELD ``datatype="char"``,
and cannot be written in BINARY or BINARY2 serialization.
"""
message_template = (
'Attempt to write non-ASCII value ({}) to FIELD ({}) which has datatype="char"'
)
default_args = ("", "")
class E25(VOTableSpecWarning):
"""
A VOTable cannot have a DATA section without any defined FIELD; DATA will be ignored.
"""
message_template = "No FIELDs are defined; DATA section will be ignored."
def _get_warning_and_exception_classes(prefix):
classes = []
for key, val in globals().items():
if re.match(prefix + "[0-9]{2}", key):
classes.append((key, val))
classes.sort()
return classes
def _build_doc_string():
def generate_set(prefix):
classes = _get_warning_and_exception_classes(prefix)
out = io.StringIO()
for name, cls in classes:
out.write(f".. _{name}:\n\n")
msg = f"{cls.__name__}: {cls.get_short_name()}"
if not isinstance(msg, str):
msg = msg.decode("utf-8")
out.write(msg)
out.write("\n")
out.write("~" * len(msg))
out.write("\n\n")
doc = cls.__doc__
if not isinstance(doc, str):
doc = doc.decode("utf-8")
out.write(dedent(doc))
out.write("\n\n")
return out.getvalue()
warnings = generate_set("W")
exceptions = generate_set("E")
return {"warnings": warnings, "exceptions": exceptions}
if __doc__ is not None:
__doc__ = __doc__.format(**_build_doc_string())
__all__.extend([x[0] for x in _get_warning_and_exception_classes("W")])
__all__.extend([x[0] for x in _get_warning_and_exception_classes("E")])
|
9e2345a86e6f36939ffa4a2503ced1e214103debbbe8df2d5edc7efca5d20359 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Various XML-related utilities.
"""
# ASTROPY
from astropy.logger import log
from astropy.utils import data
from astropy.utils.xml import check as xml_check
from astropy.utils.xml import validate
# LOCAL
from .exceptions import W02, W03, W04, W05, vo_warn, warn_or_raise
__all__ = [
"check_id",
"fix_id",
"check_token",
"check_mime_content_type",
"check_anyuri",
"validate_schema",
]
def check_id(ID, name="ID", config=None, pos=None):
"""
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if *ID*
is not a valid XML ID_.
*name* is the name of the attribute being checked (used only for
error messages).
"""
if ID is not None and not xml_check.check_id(ID):
warn_or_raise(W02, W02, (name, ID), config, pos)
return False
return True
def fix_id(ID, config=None, pos=None):
"""
Given an arbitrary string, create one that can be used as an xml id.
This is rather simplistic at the moment, since it just replaces
non-valid characters with underscores.
"""
if ID is None:
return None
corrected = xml_check.fix_id(ID)
if corrected != ID:
vo_warn(W03, (ID, corrected), config, pos)
return corrected
_token_regex = r"(?![\r\l\t ])[^\r\l\t]*(?![\r\l\t ])"
def check_token(token, attr_name, config=None, pos=None):
"""
Raises a `ValueError` if *token* is not a valid XML token.
As defined by XML Schema Part 2.
"""
if token is not None and not xml_check.check_token(token):
return False
return True
def check_mime_content_type(content_type, config=None, pos=None):
"""
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if
*content_type* is not a valid MIME content type.
As defined by RFC 2045 (syntactically, at least).
"""
if content_type is not None and not xml_check.check_mime_content_type(content_type):
warn_or_raise(W04, W04, content_type, config, pos)
return False
return True
def check_anyuri(uri, config=None, pos=None):
"""
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if
*uri* is not a valid URI.
As defined in RFC 2396.
"""
if uri is not None and not xml_check.check_anyuri(uri):
warn_or_raise(W05, W05, uri, config, pos)
return False
return True
def validate_schema(filename, version="1.1"):
"""
Validates the given file against the appropriate VOTable schema.
Parameters
----------
filename : str
The path to the XML file to validate
version : str, optional
The VOTABLE version to check, which must be a string \"1.0\",
\"1.1\", \"1.2\" or \"1.3\". If it is not one of these,
version \"1.1\" is assumed.
For version \"1.0\", it is checked against a DTD, since that
version did not have an XML Schema.
Returns
-------
returncode, stdout, stderr : int, str, str
Returns the returncode from xmllint and the stdout and stderr
as strings
"""
if version not in ("1.0", "1.1", "1.2", "1.3"):
log.info(f"{filename} has version {version}, using schema 1.1")
version = "1.1"
if version in ("1.1", "1.2", "1.3"):
schema_path = data.get_pkg_data_filename(f"data/VOTable.v{version}.xsd")
else:
schema_path = data.get_pkg_data_filename("data/VOTable.dtd")
return validate.validate_schema(filename, schema_path)
|
487d1db23388913415045b30ec17e991ddc6b4e4ba07d60095d419e4cca8accc | from .tiled_compression import *
|
47a15e081acb2f1a9674b2c30e8f5879d0aa5c07cbb26408552e354b4a5ada61 | """
This module contains the FITS compression algorithms in numcodecs style Codecs.
"""
from gzip import compress as gzip_compress
from gzip import decompress as gzip_decompress
import numpy as np
from astropy.io.fits._tiled_compression._compression import (
compress_hcompress_1_c,
compress_plio_1_c,
compress_rice_1_c,
decompress_hcompress_1_c,
decompress_plio_1_c,
decompress_rice_1_c,
)
# If numcodecs is installed, we use Codec as a base class for the codecs below
# so that they can optionally be used as codecs in any package relying on
# numcodecs - however this is optional and if numcodecs is not installed we use
# our own base class. This does not affect any compressed data functionality
# in astropy.io.fits.
try:
from numcodecs.abc import Codec
except ImportError:
class Codec:
codec_id = None
__all__ = [
"Gzip1",
"Gzip2",
"Rice1",
"PLIO1",
"HCompress1",
]
class Gzip1(Codec):
"""
The FITS GZIP 1 compression and decompression algorithm.
The Gzip algorithm is used in the free GNU software compression utility of
the same name. It was created by J. L. Gailly and M. Adler, based on the
DEFLATE algorithm (Deutsch 1996), which is a combination of LZ77 (Ziv &
Lempel 1977) and Huffman coding.
"""
codec_id = "FITS_GZIP1"
def decode(self, buf):
"""
Decompress buffer using the GZIP_1 algorithm.
Parameters
----------
buf : bytes or array_like
The buffer to decompress.
Returns
-------
buf : np.ndarray
The decompressed buffer.
"""
# In principle we should be able to not have .tobytes() here and avoid
# the copy but this does not work correctly in Python 3.11.
cbytes = np.frombuffer(buf, dtype=np.uint8).tobytes()
dbytes = gzip_decompress(cbytes)
return np.frombuffer(dbytes, dtype=np.uint8)
def encode(self, buf):
"""
Compress the data in the buffer using the GZIP_1 algorithm.
Parameters
----------
buf _like
The buffer to compress.
Returns
-------
bytes
The compressed bytes.
"""
# In principle we should be able to not have .tobytes() here and avoid
# the copy but this does not work correctly in Python 3.11.
dbytes = np.asarray(buf).tobytes()
return gzip_compress(dbytes)
class Gzip2(Codec):
"""
The FITS GZIP2 compression and decompression algorithm.
The gzip2 algorithm is a variation on 'GZIP 1'. In this case the buffer in
the array of data values are shuffled so that they are arranged in order of
decreasing significance before being compressed.
For example, a five-element contiguous array of two-byte (16-bit) integer
values, with an original big-endian byte order of:
.. math::
A1 A2 B1 B2 C1 C2 D1 D2 E1 E2
will have the following byte order after shuffling:
.. math::
A1 B1 C1 D1 E1 A2 B2 C2 D2 E2,
where A1, B1, C1, D1, and E1 are the most-significant buffer from
each of the integer values.
Byte shuffling shall only be performed for integer or floating-point
numeric data types; logical, bit, and character types must not be shuffled.
Parameters
----------
itemsize
The number of buffer per value (e.g. 2 for a 16-bit integer)
"""
codec_id = "FITS_GZIP2"
def __init__(self, *, itemsize: int):
super().__init__()
self.itemsize = itemsize
def decode(self, buf):
"""
Decompress buffer using the GZIP_2 algorithm.
Parameters
----------
buf : bytes or array_like
The buffer to decompress.
Returns
-------
buf : np.ndarray
The decompressed buffer.
"""
cbytes = np.frombuffer(buf, dtype=np.uint8).tobytes()
# Start off by unshuffling buffer
unshuffled_buffer = gzip_decompress(cbytes)
array = np.frombuffer(unshuffled_buffer, dtype=np.uint8)
return array.reshape((self.itemsize, -1)).T.ravel()
def encode(self, buf):
"""
Compress the data in the buffer using the GZIP_2 algorithm.
Parameters
----------
buf : bytes or array_like
The buffer to compress.
Returns
-------
bytes
The compressed bytes.
"""
# Start off by shuffling buffer
array = np.asarray(buf).ravel()
itemsize = array.dtype.itemsize
array = array.view(np.uint8)
shuffled_buffer = array.reshape((-1, itemsize)).T.ravel().tobytes()
return gzip_compress(shuffled_buffer)
class Rice1(Codec):
"""
The FITS RICE1 compression and decompression algorithm.
The Rice algorithm [1]_ is simple and very fast It requires only enough
memory to hold a single block of 16 or 32 pixels at a time. It codes the
pixels in small blocks and so is able to adapt very quickly to changes in
the input image statistics (e.g., Rice has no problem handling cosmic rays,
bright stars, saturated pixels, etc.).
Parameters
----------
blocksize
The blocksize to use, each tile is coded into blocks a number of pixels
wide. The default value in FITS headers is 32 pixels per block.
bytepix
The number of 8-bit buffer in each original integer pixel value.
References
----------
.. [1] Rice, R. F., Yeh, P.-S., and Miller, W. H. 1993, in Proc. of the 9th
AIAA Computing in Aerospace Conf., AIAA-93-4541-CP, American Institute of
Aeronautics and Astronautics [https://doi.org/10.2514/6.1993-4541]
"""
codec_id = "FITS_RICE1"
def __init__(self, *, blocksize: int, bytepix: int, tilesize: int):
self.blocksize = blocksize
self.bytepix = bytepix
self.tilesize = tilesize
def decode(self, buf):
"""
Decompress buffer using the RICE_1 algorithm.
Parameters
----------
buf : bytes or array_like
The buffer to decompress.
Returns
-------
buf : np.ndarray
The decompressed buffer.
"""
cbytes = np.frombuffer(buf, dtype=np.uint8).tobytes()
dbytes = decompress_rice_1_c(
cbytes, self.blocksize, self.bytepix, self.tilesize
)
return np.frombuffer(dbytes, dtype=f"i{self.bytepix}")
def encode(self, buf):
"""
Compress the data in the buffer using the RICE_1 algorithm.
Parameters
----------
buf : bytes or array_like
The buffer to compress.
Returns
-------
bytes
The compressed bytes.
"""
dbytes = np.asarray(buf).astype(f"i{self.bytepix}").tobytes()
return compress_rice_1_c(dbytes, self.blocksize, self.bytepix)
class PLIO1(Codec):
"""
The FITS PLIO1 compression and decompression algorithm.
The IRAF PLIO (pixel list) algorithm was developed to store integer-valued
image masks in a compressed form. Such masks often have large regions of
constant value hence are highly compressible. The compression algorithm
used is based on run-length encoding, with the ability to dynamically
follow level changes in the image, allowing a 16-bit encoding to be used
regardless of the image depth.
"""
codec_id = "FITS_PLIO1"
def __init__(self, *, tilesize: int):
self.tilesize = tilesize
def decode(self, buf):
"""
Decompress buffer using the PLIO_1 algorithm.
Parameters
----------
buf : bytes or array_like
The buffer to decompress.
Returns
-------
buf : np.ndarray
The decompressed buffer.
"""
cbytes = np.frombuffer(buf, dtype=np.uint8).tobytes()
dbytes = decompress_plio_1_c(cbytes, self.tilesize)
return np.frombuffer(dbytes, dtype="i4")
def encode(self, buf):
"""
Compress the data in the buffer using the PLIO_1 algorithm.
Parameters
----------
buf : bytes or array_like
The buffer to compress.
Returns
-------
bytes
The compressed bytes.
"""
dbytes = np.asarray(buf).astype("i4").tobytes()
return compress_plio_1_c(dbytes, self.tilesize)
class HCompress1(Codec):
"""
The FITS HCompress compression and decompression algorithm.
Hcompress is an the image compression package written by Richard L. White
for use at the Space Telescope Science Institute. Hcompress was used to
compress the STScI Digitized Sky Survey and has also been used to compress
the preview images in the Hubble Data Archive.
The technique gives very good compression for astronomical images and is
relatively fast. The calculations are carried out using integer arithmetic
and are entirely reversible. Consequently, the program can be used for
either lossy or lossless compression, with no special approach needed for
the lossless case.
Parameters
----------
scale
The integer scale parameter determines the amount of compression. Scale
= 0 or 1 leads to lossless compression, i.e. the decompressed image has
exactly the same pixel values as the original image. If the scale
factor is greater than 1 then the compression is lossy: the
decompressed image will not be exactly the same as the original
smooth
At high compressions factors the decompressed image begins to appear
blocky because of the way information is discarded. This blockiness
ness is greatly reduced, producing more pleasing images, if the image
is smoothed slightly during decompression.
References
----------
.. [1] White, R. L. 1992, in Proceedings of the NASA Space and Earth Science
Data Compression Workshop, ed. J. C. Tilton, Snowbird, UT;
https://archive.org/details/nasa_techdoc_19930016742
"""
codec_id = "FITS_HCOMPRESS1"
def __init__(self, *, scale: int, smooth: bool, bytepix: int, nx: int, ny: int):
self.scale = scale
self.smooth = smooth
self.bytepix = bytepix
# NOTE: we should probably make this less confusing, but nx is shape[0] and ny is shape[1]
self.nx = nx
self.ny = ny
def decode(self, buf):
"""
Decompress buffer using the HCOMPRESS_1 algorithm.
Parameters
----------
buf : bytes or array_like
The buffer to decompress.
Returns
-------
buf : np.ndarray
The decompressed buffer.
"""
cbytes = np.frombuffer(buf, dtype=np.uint8).tobytes()
dbytes = decompress_hcompress_1_c(
cbytes, self.nx, self.ny, self.scale, self.smooth, self.bytepix
)
# fits_hdecompress* always returns 4 byte integers irrespective of bytepix
return np.frombuffer(dbytes, dtype="i4")
def encode(self, buf):
"""
Compress the data in the buffer using the HCOMPRESS_1 algorithm.
Parameters
----------
buf : bytes or array_like
The buffer to compress.
Returns
-------
bytes
The compressed bytes.
"""
dbytes = np.asarray(buf).astype(f"i{self.bytepix}").tobytes()
return compress_hcompress_1_c(
dbytes, self.nx, self.ny, self.scale, self.bytepix
)
|
38f9ef28cd8ca6fdc1274dbf1bd47a6bf8064a5110070ba6e5ae08174f18d5f1 | # Licensed under a 3-clause BSD style license
import os
from collections import defaultdict
from setuptools import Extension
from extension_helpers import pkg_config
SRC_DIR = os.path.join(os.path.dirname(__file__), "src")
def get_extensions():
cfg = defaultdict(list)
cfg["sources"].extend(
[
os.path.join(SRC_DIR, "compression.c"),
os.path.join(SRC_DIR, "unquantize.c"),
]
)
if int(os.environ.get("ASTROPY_USE_SYSTEM_CFITSIO", 0)) or int(
os.environ.get("ASTROPY_USE_SYSTEM_ALL", 0)
):
for k, v in pkg_config(["cfitsio"], ["cfitsio"]).items():
cfg[k].extend(v)
else:
cfg["sources"].extend(
[
os.path.join("cextern", "cfitsio", "lib", "pliocomp.c"),
os.path.join("cextern", "cfitsio", "lib", "ricecomp.c"),
os.path.join("cextern", "cfitsio", "lib", "fits_hcompress.c"),
os.path.join("cextern", "cfitsio", "lib", "fits_hdecompress.c"),
os.path.join("cextern", "cfitsio", "lib", "quantize.c"),
]
)
cfg["include_dirs"].append(SRC_DIR)
return [Extension("astropy.io.fits._tiled_compression._compression", **cfg)]
|
0f90ad40beb55abf7f3e118ef39fcaa57659806e0ecc4076d85190ff53a78065 | import numpy as np
def _iter_array_tiles(data_shape, tile_shape):
"""
Given an array shape and a tile shape, iterate over the tiles in the array
returning at each iteration the slices for the array.
"""
ndim = len(data_shape)
istart = np.zeros(ndim, dtype=int)
while True:
# In the following, we don't need to special case tiles near the edge
# as Numpy will automatically ignore parts of the slices that are out
# of bounds.
tile_slices = tuple(
[
slice(istart[idx], istart[idx] + tile_shape[idx])
for idx in range(len(istart))
]
)
yield tile_slices
istart[-1] += tile_shape[-1]
for idx in range(ndim - 1, 0, -1):
if istart[idx] >= data_shape[idx]:
istart[idx] = 0
istart[idx - 1] += tile_shape[idx - 1]
if istart[0] >= data_shape[0]:
break
|
8d37027655976aeaa1741d4e2123ded92f50c30e360ed8d7365cb442087f3735 | """
This module contains low level helper functions for compressing and
decompressing buffer for the Tiled Table Compression algorithms as specified in
the FITS 4 standard.
"""
import sys
import numpy as np
from astropy.io.fits.hdu.base import BITPIX2DTYPE
from .codecs import PLIO1, Gzip1, Gzip2, HCompress1, Rice1
from .quantization import DITHER_METHODS, QuantizationFailedException, Quantize
from .utils import _iter_array_tiles
ALGORITHMS = {
"GZIP_1": Gzip1,
"GZIP_2": Gzip2,
"RICE_1": Rice1,
"RICE_ONE": Rice1,
"PLIO_1": PLIO1,
"HCOMPRESS_1": HCompress1,
}
DEFAULT_ZBLANK = -2147483648
__all__ = ["compress_hdu", "decompress_hdu"]
def _decompress_tile(buf, *, algorithm: str, **settings):
"""
Decompress the buffer of a tile using the given compression algorithm.
Parameters
----------
buf
The compressed buffer to be decompressed.
algorithm
A supported decompression algorithm.
settings
Any parameters for the given compression algorithm
"""
return ALGORITHMS[algorithm](**settings).decode(buf)
def _compress_tile(buf, *, algorithm: str, **settings):
"""
Compress the buffer of a tile using the given compression algorithm.
Parameters
----------
buf
The decompressed buffer to be compressed.
algorithm
A supported compression algorithm.
settings
Any parameters for the given compression algorithm
"""
return ALGORITHMS[algorithm](**settings).encode(buf)
def _tile_shape(header):
return tuple(header[f"ZTILE{idx}"] for idx in range(header["ZNAXIS"], 0, -1))
def _data_shape(header):
return tuple(header[f"ZNAXIS{idx}"] for idx in range(header["ZNAXIS"], 0, -1))
def _header_to_settings(header, actual_tile_shape):
settings = {}
if header["ZCMPTYPE"] == "GZIP_2":
settings["itemsize"] = abs(header["ZBITPIX"]) // 8
elif header["ZCMPTYPE"] == "PLIO_1":
# We have to calculate the tilesize from the shape of the tile not the
# header, so that it's correct for edge tiles etc.
settings["tilesize"] = np.product(actual_tile_shape)
elif header["ZCMPTYPE"] in ("RICE_1", "RICE_ONE"):
settings["blocksize"] = _get_compression_setting(header, "BLOCKSIZE", 32)
settings["bytepix"] = _get_compression_setting(header, "BYTEPIX", 4)
settings["tilesize"] = np.product(actual_tile_shape)
elif header["ZCMPTYPE"] == "HCOMPRESS_1":
settings["bytepix"] = 8
settings["scale"] = int(_get_compression_setting(header, "SCALE", 0))
settings["smooth"] = _get_compression_setting(header, "SMOOTH", 0)
# HCOMPRESS requires 2D tiles, so to find the shape of the 2D tile we
# need to ignore all length 1 tile dimensions
# Also cfitsio expects the tile shape in C order
shape_2d = tuple(nd for nd in actual_tile_shape if nd != 1)
if len(shape_2d) != 2:
raise ValueError(f"HCOMPRESS expects two dimensional tiles, got {shape_2d}")
settings["nx"] = shape_2d[0]
settings["ny"] = shape_2d[1]
return settings
def _finalize_array(tile_buffer, *, bitpix, tile_shape, algorithm, lossless):
"""
Convert a buffer to an array.
This is a helper function which takes a raw buffer (as output by .decode)
and translates it into a numpy array with the correct dtype, endianness and
shape.
"""
if algorithm.startswith("GZIP"):
# This algorithm is taken from fitsio
# https://github.com/astropy/astropy/blob/a8cb1668d4835562b89c0d0b3448ac72ca44db63/cextern/cfitsio/lib/imcompress.c#L6345-L6388
tilelen = np.product(tile_shape)
tilebytesize = len(tile_buffer)
if tilebytesize == tilelen * 2:
dtype = ">i2"
elif tilebytesize == tilelen * 4:
if bitpix < 0 and lossless:
dtype = ">f4"
else:
dtype = ">i4"
elif tilebytesize == tilelen * 8:
if bitpix < 0 and lossless:
dtype = ">f8"
else:
dtype = ">i8"
else:
# Just return the raw bytes
dtype = ">u1"
tile_data = np.asarray(tile_buffer).view(dtype).reshape(tile_shape)
else:
# For RICE_1 compression the tiles that are on the edge can end up
# being padded, so we truncate excess values
if algorithm in ("RICE_1", "RICE_ONE", "PLIO_1"):
tile_buffer = tile_buffer[: np.product(tile_shape)]
if tile_buffer.data.format == "b":
# NOTE: this feels like a Numpy bug - need to investigate
tile_data = np.asarray(tile_buffer, dtype=np.uint8).reshape(tile_shape)
else:
tile_data = np.asarray(tile_buffer).reshape(tile_shape)
return tile_data
def _check_compressed_header(header):
# NOTE: this could potentially be moved up into CompImageHDU, e.g. in a
# _verify method.
# Check for overflows which might cause issues when calling C code
for kw in ["ZNAXIS", "ZVAL1", "ZVAL2", "ZBLANK", "BLANK"]:
if kw in header:
if header[kw] > 0 and header[kw] > np.iinfo(np.intc).max:
raise OverflowError(f"{kw} value {header[kw]} is too large")
for i in range(1, header["ZNAXIS"] + 1):
for kw_name in ["ZNAXIS", "ZTILE"]:
kw = f"{kw_name}{i}"
if kw in header:
if header[kw] > 0 and header[kw] > np.iinfo(np.int32).max:
raise OverflowError(f"{kw} value {header[kw]} is too large")
for i in range(1, header["NAXIS"] + 1):
kw = f"NAXIS{i}"
if kw in header:
if header[kw] > 0 and header[kw] > np.iinfo(np.int64).max:
raise OverflowError(f"{kw} value {header[kw]} is too large")
for kw in ["TNULL1", "PCOUNT", "THEAP"]:
if kw in header:
if header[kw] > 0 and header[kw] > np.iinfo(np.int64).max:
raise OverflowError(f"{kw} value {header[kw]} is too large")
for kw in ["ZVAL3"]:
if kw in header:
if header[kw] > np.finfo(np.float32).max:
raise OverflowError(f"{kw} value {header[kw]} is too large")
# Validate data types
for kw in ["ZSCALE", "ZZERO", "TZERO1", "TSCAL1"]:
if kw in header:
if not np.isreal(header[kw]):
raise TypeError(f"{kw} should be floating-point")
for kw in ["TTYPE1", "TFORM1", "ZCMPTYPE", "ZNAME1", "ZQUANTIZ"]:
if kw in header:
if not isinstance(header[kw], str):
raise TypeError(f"{kw} should be a string")
for kw in ["ZDITHER0"]:
if kw in header:
if not np.isreal(header[kw]) or not float(header[kw]).is_integer():
raise TypeError(f"{kw} should be an integer")
if "TFORM1" in header:
for valid in ["1PB", "1PI", "1PJ", "1QB", "1QI", "1QJ"]:
if header["TFORM1"].startswith(valid):
break
else:
raise RuntimeError(f"Invalid TFORM1: {header['TFORM1']}")
# Check values
for kw in ["TFIELDS", "PCOUNT"] + [
f"NAXIS{idx + 1}" for idx in range(header["NAXIS"])
]:
if kw in header:
if header[kw] < 0:
raise ValueError(f"{kw} should not be negative.")
for kw in ["ZNAXIS", "TFIELDS"]:
if kw in header:
if header[kw] < 0 or header[kw] > 999:
raise ValueError(f"{kw} should be in the range 0 to 999")
if header["ZBITPIX"] not in [8, 16, 32, 64, -32, -64]:
raise ValueError(f"Invalid value for BITPIX: {header['ZBITPIX']}")
if header["ZCMPTYPE"] not in ALGORITHMS:
raise ValueError(f"Unrecognized compression type: {header['ZCMPTYPE']}")
# Check that certain keys are present
header["ZNAXIS"]
header["ZBITPIX"]
def _get_compression_setting(header, name, default):
# Settings for the various compression algorithms are stored in pairs of
# keywords called ZNAME? and ZVAL? - a given compression setting could be
# in any ZNAME? so we need to check through all the possible ZNAMEs which
# one matches the required setting.
for i in range(1, 1000):
if f"ZNAME{i}" not in header:
break
if header[f"ZNAME{i}"].lower() == name.lower():
return header[f"ZVAL{i}"]
return default
def decompress_hdu(hdu):
"""
Decompress the data in a `~astropy.io.fits.CompImageHDU`.
Parameters
----------
hdu : `astropy.io.fits.CompImageHDU`
Input HDU to decompress the data for.
Returns
-------
data : `numpy.ndarray`
The decompressed data array.
"""
_check_compressed_header(hdu._header)
tile_shape = _tile_shape(hdu._header)
data_shape = _data_shape(hdu._header)
data = np.zeros(data_shape, dtype=BITPIX2DTYPE[hdu._header["ZBITPIX"]])
quantized = "ZSCALE" in hdu.compressed_data.dtype.names
if len(hdu.compressed_data) == 0:
return None
override_itemsize = None
for irow, tile_slices in enumerate(_iter_array_tiles(data_shape, tile_shape)):
row = hdu.compressed_data[irow]
# For tiles near the edge, the tile shape from the header might not be
# correct so we have to pass the shape manually.
actual_tile_shape = data[tile_slices].shape
settings = _header_to_settings(hdu._header, actual_tile_shape)
cdata = row["COMPRESSED_DATA"]
# When quantizing floating point data, sometimes the data will not
# quantize efficiently. In these cases the raw floating point data can
# be losslessly GZIP compressed and stored in the `GZIP_COMPRESSED_DATA`
# column.
gzip_fallback = len(cdata) == 0
if gzip_fallback:
tile_buffer = _decompress_tile(
row["GZIP_COMPRESSED_DATA"], algorithm="GZIP_1"
)
tile_data = _finalize_array(
tile_buffer,
bitpix=hdu._header["ZBITPIX"],
tile_shape=actual_tile_shape,
algorithm="GZIP_1",
lossless=True,
)
else:
if hdu._header["ZCMPTYPE"] == "GZIP_2":
# Decompress with GZIP_1 just to find the total number of
# elements in the uncompressed data. We just need to do this once
# as this will be the same for all tiles.
if override_itemsize is None:
tile_data = np.asarray(_decompress_tile(cdata, algorithm="GZIP_1"))
override_itemsize = tile_data.size // int(
np.product(actual_tile_shape)
)
settings["itemsize"] = override_itemsize
tile_buffer = _decompress_tile(
cdata, algorithm=hdu._header["ZCMPTYPE"], **settings
)
tile_data = _finalize_array(
tile_buffer,
bitpix=hdu._header["ZBITPIX"],
tile_shape=actual_tile_shape,
algorithm=hdu._header["ZCMPTYPE"],
lossless=not quantized,
)
if "ZBLANK" in row.array.names:
zblank = row["ZBLANK"]
else:
zblank = hdu._header.get("ZBLANK", None)
if zblank is not None:
blank_mask = tile_data == zblank
if quantized:
dither_method = DITHER_METHODS[hdu._header.get("ZQUANTIZ", "NO_DITHER")]
dither_seed = hdu._header.get("ZDITHER0", 0)
q = Quantize(
row=(irow + dither_seed) if dither_method != -1 else 0,
dither_method=dither_method,
quantize_level=None,
bitpix=hdu._header["ZBITPIX"],
)
tile_data = np.asarray(
q.decode_quantized(tile_data, row["ZSCALE"], row["ZZERO"])
).reshape(actual_tile_shape)
if zblank is not None:
if not tile_data.flags.writeable:
tile_data = tile_data.copy()
tile_data[blank_mask] = np.nan
data[tile_slices] = tile_data
return data
def compress_hdu(hdu):
"""
Compress the data in a `~astropy.io.fits.CompImageHDU`.
The input HDU is expected to have a uncompressed numpy array as it's
``.data`` attribute.
Parameters
----------
hdu : `astropy.io.fits.CompImageHDU`
Input HDU to compress the data for.
Returns
-------
nbytes : `int`
The number of bytes of the heap.
heap : `bytes`
The bytes of the FITS table heap.
"""
if not isinstance(hdu.data, np.ndarray):
raise TypeError("CompImageHDU.data must be a numpy.ndarray")
_check_compressed_header(hdu._header)
# TODO: This implementation is memory inefficient as it generates all the
# compressed bytes before forming them into the heap, leading to 2x the
# potential memory usage. Directly storing the compressed bytes into an
# expanding heap would fix this.
tile_shape = _tile_shape(hdu._header)
data_shape = _data_shape(hdu._header)
compressed_bytes = []
gzip_fallback = []
scales = []
zeros = []
zblank = None
noisebit = _get_compression_setting(hdu._header, "noisebit", 0)
for irow, tile_slices in enumerate(_iter_array_tiles(data_shape, tile_shape)):
data = hdu.data[tile_slices]
settings = _header_to_settings(hdu._header, data.shape)
quantize = "ZSCALE" in hdu.columns.dtype.names
if data.dtype.kind == "f" and quantize:
dither_method = DITHER_METHODS[hdu._header.get("ZQUANTIZ", "NO_DITHER")]
dither_seed = hdu._header.get("ZDITHER0", 0)
q = Quantize(
row=(irow + dither_seed) if dither_method != -1 else 0,
dither_method=dither_method,
quantize_level=noisebit,
bitpix=hdu._header["ZBITPIX"],
)
original_shape = data.shape
# If there are any NaN values in the data, we should reset them to
# a value that will not affect the quantization (an already existing
# data value in the array) and we can then reset this after quantization
# to ZBLANK and set the appropriate header keyword
nan_mask = np.isnan(data)
any_nan = np.any(nan_mask)
if any_nan:
# Note that we need to copy here to avoid modifying the input array.
data = data.copy()
if np.all(nan_mask):
data[nan_mask] = 0
else:
data[nan_mask] = np.nanmin(data)
try:
data, scale, zero = q.encode_quantized(data)
except QuantizationFailedException:
if any_nan:
# reset NaN values since we will losslessly compress.
data[nan_mask] = np.nan
scales.append(0)
zeros.append(0)
gzip_fallback.append(True)
else:
data = np.asarray(data).reshape(original_shape)
if any_nan:
if not data.flags.writeable:
data = data.copy()
# For now, we just use the default ZBLANK value and assume
# this is the same for all tiles. We could generalize this
# to allow different ZBLANK values (for example if the data
# includes this value by chance) and to allow different values
# per tile, which is allowed by the FITS standard.
data[nan_mask] = DEFAULT_ZBLANK
zblank = DEFAULT_ZBLANK
scales.append(scale)
zeros.append(zero)
gzip_fallback.append(False)
else:
scales.append(0)
zeros.append(0)
gzip_fallback.append(False)
# The original compress_hdu assumed the data was in native endian, so we
# change this here:
if hdu._header["ZCMPTYPE"].startswith("GZIP") or gzip_fallback[-1]:
# This is apparently needed so that our heap data agrees with
# the C implementation!?
data = data.astype(data.dtype.newbyteorder(">"))
else:
if not data.dtype.isnative:
data = data.astype(data.dtype.newbyteorder("="))
if gzip_fallback[-1]:
cbytes = _compress_tile(data, algorithm="GZIP_1")
else:
cbytes = _compress_tile(data, algorithm=hdu._header["ZCMPTYPE"], **settings)
compressed_bytes.append(cbytes)
if zblank is not None:
hdu._header["ZBLANK"] = zblank
table = np.zeros(len(compressed_bytes), dtype=hdu.columns.dtype.newbyteorder(">"))
if "ZSCALE" in table.dtype.names:
table["ZSCALE"] = np.array(scales)
table["ZZERO"] = np.array(zeros)
for irow, cbytes in enumerate(compressed_bytes):
table["COMPRESSED_DATA"][irow, 0] = len(cbytes)
table["COMPRESSED_DATA"][:1, 1] = 0
table["COMPRESSED_DATA"][1:, 1] = np.cumsum(table["COMPRESSED_DATA"][:-1, 0])
for irow in range(len(compressed_bytes)):
if gzip_fallback[irow]:
table["GZIP_COMPRESSED_DATA"][irow] = table["COMPRESSED_DATA"][irow]
table["COMPRESSED_DATA"][irow] = 0
# For PLIO_1, the size of each heap element is a factor of two lower than
# the real size - not clear if this is deliberate or bug somewhere.
if hdu._header["ZCMPTYPE"] == "PLIO_1":
table["COMPRESSED_DATA"][:, 0] //= 2
# For PLIO_1, it looks like the compressed data is always stored big endian
if hdu._header["ZCMPTYPE"] == "PLIO_1":
for irow in range(len(compressed_bytes)):
if not gzip_fallback[irow]:
array = np.frombuffer(compressed_bytes[irow], dtype="i2")
if array.dtype.byteorder == "<" or (
array.dtype.byteorder == "=" and sys.byteorder == "little"
):
compressed_bytes[irow] = array.astype(">i2").tobytes()
compressed_bytes = b"".join(compressed_bytes)
table_bytes = table.tobytes()
if len(table_bytes) != hdu._theap:
raise Exception(
f"Unexpected compressed table size (expected {hdu._theap}, got {len(table_bytes)})"
)
heap = table.tobytes() + compressed_bytes
return len(compressed_bytes), np.frombuffer(heap, dtype=np.uint8)
|
c39c3cf21d48fa1b4b5ab8fecf5a5878be7b8f81aed36f1715a9b62d3d983e31 | """
This file contains the code for Quantizing / Dequantizing floats.
"""
import numpy as np
from astropy.io.fits._tiled_compression._compression import (
quantize_double_c,
quantize_float_c,
unquantize_double_c,
unquantize_float_c,
)
from astropy.io.fits.hdu.base import BITPIX2DTYPE
__all__ = ["Quantize"]
DITHER_METHODS = {"NO_DITHER": -1, "SUBTRACTIVE_DITHER_1": 1, "SUBTRACTIVE_DITHER_2": 2}
class QuantizationFailedException(Exception):
pass
class Quantize:
"""
Quantization of floating-point data following the FITS standard.
"""
def __init__(
self, *, row: int, dither_method: int, quantize_level: int, bitpix: int
):
super().__init__()
self.row = row
# TODO: pass dither method as a string instead of int?
self.quantize_level = quantize_level
self.dither_method = dither_method
self.bitpix = bitpix
# NOTE: below we use decode_quantized and encode_quantized instead of
# decode and encode as we need to break with the numcodec API and take/return
# scale and zero in addition to quantized value. We should figure out how
# to properly use the numcodec API for this use case.
def decode_quantized(self, buf, scale, zero):
"""
Unquantize data.
Parameters
----------
buf : bytes or array_like
The buffer to unquantize.
Returns
-------
np.ndarray
The unquantized buffer.
"""
qbytes = np.asarray(buf)
qbytes = qbytes.astype(qbytes.dtype.newbyteorder("="))
# TODO: figure out if we need to support null checking
if self.dither_method == -1:
# For NO_DITHER we should just use the scale and zero directly
return qbytes * scale + zero
if self.bitpix == -32:
ubytes = unquantize_float_c(
qbytes.tobytes(),
self.row,
qbytes.size,
scale,
zero,
self.dither_method,
0,
0,
0.0,
qbytes.dtype.itemsize,
)
elif self.bitpix == -64:
ubytes = unquantize_double_c(
qbytes.tobytes(),
self.row,
qbytes.size,
scale,
zero,
self.dither_method,
0,
0,
0.0,
qbytes.dtype.itemsize,
)
else:
raise TypeError("bitpix should be one of -32 or -64")
return np.frombuffer(ubytes, dtype=BITPIX2DTYPE[self.bitpix]).data
def encode_quantized(self, buf):
"""
Quantize data.
Parameters
----------
buf : bytes or array_like
The buffer to quantize.
Returns
-------
np.ndarray
A buffer with quantized data.
"""
uarray = np.asarray(buf)
uarray = uarray.astype(uarray.dtype.newbyteorder("="))
# TODO: figure out if we need to support null checking
if uarray.dtype.itemsize == 4:
qbytes, status, scale, zero = quantize_float_c(
uarray.tobytes(),
self.row,
uarray.size,
1,
0,
0,
self.quantize_level,
self.dither_method,
)[:4]
elif uarray.dtype.itemsize == 8:
qbytes, status, scale, zero = quantize_double_c(
uarray.tobytes(),
self.row,
uarray.size,
1,
0,
0,
self.quantize_level,
self.dither_method,
)[:4]
if status == 0:
raise QuantizationFailedException()
else:
return np.frombuffer(qbytes, dtype=np.int32), scale, zero
|
651ac114c8e131fd4cdf25f45ab51b0921a3073dc22cc9746f7b7af69d7c43b9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
``fitsheader`` is a command line script based on astropy.io.fits for printing
the header(s) of one or more FITS file(s) to the standard output in a human-
readable format.
Example uses of fitsheader:
1. Print the header of all the HDUs of a .fits file::
$ fitsheader filename.fits
2. Print the header of the third and fifth HDU extension::
$ fitsheader --extension 3 --extension 5 filename.fits
3. Print the header of a named extension, e.g. select the HDU containing
keywords EXTNAME='SCI' and EXTVER='2'::
$ fitsheader --extension "SCI,2" filename.fits
4. Print only specific keywords::
$ fitsheader --keyword BITPIX --keyword NAXIS filename.fits
5. Print keywords NAXIS, NAXIS1, NAXIS2, etc using a wildcard::
$ fitsheader --keyword NAXIS* filename.fits
6. Dump the header keywords of all the files in the current directory into a
machine-readable csv file::
$ fitsheader --table ascii.csv *.fits > keywords.csv
7. Specify hierarchical keywords with the dotted or spaced notation::
$ fitsheader --keyword ESO.INS.ID filename.fits
$ fitsheader --keyword "ESO INS ID" filename.fits
8. Compare the headers of different fits files, following ESO's ``fitsort``
format::
$ fitsheader --fitsort --extension 0 --keyword ESO.INS.ID *.fits
9. Same as above, sorting the output along a specified keyword::
$ fitsheader -f -s DATE-OBS -e 0 -k DATE-OBS -k ESO.INS.ID *.fits
10. Sort first by OBJECT, then DATE-OBS::
$ fitsheader -f -s OBJECT -s DATE-OBS *.fits
Note that compressed images (HDUs of type
:class:`~astropy.io.fits.CompImageHDU`) really have two headers: a real
BINTABLE header to describe the compressed data, and a fake IMAGE header
representing the image that was compressed. Astropy returns the latter by
default. You must supply the ``--compressed`` option if you require the real
header that describes the compression.
With Astropy installed, please run ``fitsheader --help`` to see the full usage
documentation.
"""
import argparse
import sys
import numpy as np
from astropy import __version__, log
from astropy.io import fits
DESCRIPTION = """
Print the header(s) of a FITS file. Optional arguments allow the desired
extension(s), keyword(s), and output format to be specified.
Note that in the case of a compressed image, the decompressed header is
shown by default.
This script is part of the Astropy package. See
https://docs.astropy.org/en/latest/io/fits/usage/scripts.html#module-astropy.io.fits.scripts.fitsheader
for further documentation.
""".strip()
class ExtensionNotFoundException(Exception):
"""Raised if an HDU extension requested by the user does not exist."""
pass
class HeaderFormatter:
"""Class to format the header(s) of a FITS file for display by the
`fitsheader` tool; essentially a wrapper around a `HDUList` object.
Example usage:
fmt = HeaderFormatter('/path/to/file.fits')
print(fmt.parse(extensions=[0, 3], keywords=['NAXIS', 'BITPIX']))
Parameters
----------
filename : str
Path to a single FITS file.
verbose : bool
Verbose flag, to show more information about missing extensions,
keywords, etc.
Raises
------
OSError
If `filename` does not exist or cannot be read.
"""
def __init__(self, filename, verbose=True):
self.filename = filename
self.verbose = verbose
self._hdulist = fits.open(filename)
def parse(self, extensions=None, keywords=None, compressed=False):
"""Returns the FITS file header(s) in a readable format.
Parameters
----------
extensions : list of int or str, optional
Format only specific HDU(s), identified by number or name.
The name can be composed of the "EXTNAME" or "EXTNAME,EXTVER"
keywords.
keywords : list of str, optional
Keywords for which the value(s) should be returned.
If not specified, then the entire header is returned.
compressed : bool, optional
If True, shows the header describing the compression, rather than
the header obtained after decompression. (Affects FITS files
containing `CompImageHDU` extensions only.)
Returns
-------
formatted_header : str or astropy.table.Table
Traditional 80-char wide format in the case of `HeaderFormatter`;
an Astropy Table object in the case of `TableHeaderFormatter`.
"""
# `hdukeys` will hold the keys of the HDUList items to display
if extensions is None:
hdukeys = range(len(self._hdulist)) # Display all by default
else:
hdukeys = []
for ext in extensions:
try:
# HDU may be specified by number
hdukeys.append(int(ext))
except ValueError:
# The user can specify "EXTNAME" or "EXTNAME,EXTVER"
parts = ext.split(",")
if len(parts) > 1:
extname = ",".join(parts[0:-1])
extver = int(parts[-1])
hdukeys.append((extname, extver))
else:
hdukeys.append(ext)
# Having established which HDUs the user wants, we now format these:
return self._parse_internal(hdukeys, keywords, compressed)
def _parse_internal(self, hdukeys, keywords, compressed):
"""The meat of the formatting; in a separate method to allow overriding."""
result = []
for idx, hdu in enumerate(hdukeys):
try:
cards = self._get_cards(hdu, keywords, compressed)
except ExtensionNotFoundException:
continue
if idx > 0: # Separate HDUs by a blank line
result.append("\n")
result.append(f"# HDU {hdu} in {self.filename}:\n")
for c in cards:
result.append(f"{c}\n")
return "".join(result)
def _get_cards(self, hdukey, keywords, compressed):
"""Returns a list of `astropy.io.fits.card.Card` objects.
This function will return the desired header cards, taking into
account the user's preference to see the compressed or uncompressed
version.
Parameters
----------
hdukey : int or str
Key of a single HDU in the HDUList.
keywords : list of str, optional
Keywords for which the cards should be returned.
compressed : bool, optional
If True, shows the header describing the compression.
Raises
------
ExtensionNotFoundException
If the hdukey does not correspond to an extension.
"""
# First we obtain the desired header
try:
if compressed:
# In the case of a compressed image, return the header before
# decompression (not the default behavior)
header = self._hdulist[hdukey]._header
else:
header = self._hdulist[hdukey].header
except (IndexError, KeyError):
message = f"{self.filename}: Extension {hdukey} not found."
if self.verbose:
log.warning(message)
raise ExtensionNotFoundException(message)
if not keywords: # return all cards
cards = header.cards
else: # specific keywords are requested
cards = []
for kw in keywords:
try:
crd = header.cards[kw]
if isinstance(crd, fits.card.Card): # Single card
cards.append(crd)
else: # Allow for wildcard access
cards.extend(crd)
except KeyError: # Keyword does not exist
if self.verbose:
log.warning(
f"{self.filename} (HDU {hdukey}): Keyword {kw} not found."
)
return cards
def close(self):
self._hdulist.close()
class TableHeaderFormatter(HeaderFormatter):
"""Class to convert the header(s) of a FITS file into a Table object.
The table returned by the `parse` method will contain four columns:
filename, hdu, keyword, and value.
Subclassed from HeaderFormatter, which contains the meat of the formatting.
"""
def _parse_internal(self, hdukeys, keywords, compressed):
"""Method called by the parse method in the parent class."""
tablerows = []
for hdu in hdukeys:
try:
for card in self._get_cards(hdu, keywords, compressed):
tablerows.append(
{
"filename": self.filename,
"hdu": hdu,
"keyword": card.keyword,
"value": str(card.value),
}
)
except ExtensionNotFoundException:
pass
if tablerows:
from astropy import table
return table.Table(tablerows)
return None
def print_headers_traditional(args):
"""Prints FITS header(s) using the traditional 80-char format.
Parameters
----------
args : argparse.Namespace
Arguments passed from the command-line as defined below.
"""
for idx, filename in enumerate(args.filename): # support wildcards
if idx > 0 and not args.keyword:
print() # print a newline between different files
formatter = None
try:
formatter = HeaderFormatter(filename)
print(
formatter.parse(args.extensions, args.keyword, args.compressed), end=""
)
except OSError as e:
log.error(str(e))
finally:
if formatter:
formatter.close()
def print_headers_as_table(args):
"""Prints FITS header(s) in a machine-readable table format.
Parameters
----------
args : argparse.Namespace
Arguments passed from the command-line as defined below.
"""
tables = []
# Create a Table object for each file
for filename in args.filename: # Support wildcards
formatter = None
try:
formatter = TableHeaderFormatter(filename)
tbl = formatter.parse(args.extensions, args.keyword, args.compressed)
if tbl:
tables.append(tbl)
except OSError as e:
log.error(str(e)) # file not found or unreadable
finally:
if formatter:
formatter.close()
# Concatenate the tables
if len(tables) == 0:
return False
elif len(tables) == 1:
resulting_table = tables[0]
else:
from astropy import table
resulting_table = table.vstack(tables)
# Print the string representation of the concatenated table
resulting_table.write(sys.stdout, format=args.table)
def print_headers_as_comparison(args):
"""Prints FITS header(s) with keywords as columns.
This follows the dfits+fitsort format.
Parameters
----------
args : argparse.Namespace
Arguments passed from the command-line as defined below.
"""
from astropy import table
tables = []
# Create a Table object for each file
for filename in args.filename: # Support wildcards
formatter = None
try:
formatter = TableHeaderFormatter(filename, verbose=False)
tbl = formatter.parse(args.extensions, args.keyword, args.compressed)
if tbl:
# Remove empty keywords
tbl = tbl[np.where(tbl["keyword"] != "")]
else:
tbl = table.Table([[filename]], names=("filename",))
tables.append(tbl)
except OSError as e:
log.error(str(e)) # file not found or unreadable
finally:
if formatter:
formatter.close()
# Concatenate the tables
if len(tables) == 0:
return False
elif len(tables) == 1:
resulting_table = tables[0]
else:
resulting_table = table.vstack(tables)
# If we obtained more than one hdu, merge hdu and keywords columns
hdus = resulting_table["hdu"]
if np.ma.isMaskedArray(hdus):
hdus = hdus.compressed()
if len(np.unique(hdus)) > 1:
for tab in tables:
new_column = table.Column([f"{row['hdu']}:{row['keyword']}" for row in tab])
tab.add_column(new_column, name="hdu+keyword")
keyword_column_name = "hdu+keyword"
else:
keyword_column_name = "keyword"
# Check how many hdus we are processing
final_tables = []
for tab in tables:
final_table = [table.Column([tab["filename"][0]], name="filename")]
if "value" in tab.colnames:
for row in tab:
if row["keyword"] in ("COMMENT", "HISTORY"):
continue
final_table.append(
table.Column([row["value"]], name=row[keyword_column_name])
)
final_tables.append(table.Table(final_table))
final_table = table.vstack(final_tables)
# Sort if requested
if args.sort:
final_table.sort(args.sort)
# Reorganise to keyword by columns
final_table.pprint(max_lines=-1, max_width=-1)
def main(args=None):
"""This is the main function called by the `fitsheader` script."""
parser = argparse.ArgumentParser(
description=DESCRIPTION, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--version", action="version", version=f"%(prog)s {__version__}"
)
parser.add_argument(
"-e",
"--extension",
metavar="HDU",
action="append",
dest="extensions",
help=(
"specify the extension by name or number; "
"this argument can be repeated "
"to select multiple extensions"
),
)
parser.add_argument(
"-k",
"--keyword",
metavar="KEYWORD",
action="append",
type=str,
help=(
"specify a keyword; this argument can be "
"repeated to select multiple keywords; "
"also supports wildcards"
),
)
mode_group = parser.add_mutually_exclusive_group()
mode_group.add_argument(
"-t",
"--table",
nargs="?",
default=False,
metavar="FORMAT",
help=(
"print the header(s) in machine-readable table "
"format; the default format is "
'"ascii.fixed_width" (can be "ascii.csv", '
'"ascii.html", "ascii.latex", "fits", etc)'
),
)
mode_group.add_argument(
"-f",
"--fitsort",
action="store_true",
help=(
"print the headers as a table with each unique "
"keyword in a given column (fitsort format) "
),
)
parser.add_argument(
"-s",
"--sort",
metavar="SORT_KEYWORD",
action="append",
type=str,
help=(
"sort output by the specified header keywords, "
"can be repeated to sort by multiple keywords; "
"Only supported with -f/--fitsort"
),
)
parser.add_argument(
"-c",
"--compressed",
action="store_true",
help=(
"for compressed image data, "
"show the true header which describes "
"the compression rather than the data"
),
)
parser.add_argument(
"filename",
nargs="+",
help="path to one or more files; wildcards are supported",
)
args = parser.parse_args(args)
# If `--table` was used but no format specified,
# then use ascii.fixed_width by default
if args.table is None:
args.table = "ascii.fixed_width"
if args.sort:
args.sort = [key.replace(".", " ") for key in args.sort]
if not args.fitsort:
log.error(
"Sorting with -s/--sort is only supported in conjunction with"
" -f/--fitsort"
)
# 2: Unix error convention for command line syntax
sys.exit(2)
if args.keyword:
args.keyword = [key.replace(".", " ") for key in args.keyword]
# Now print the desired headers
try:
if args.table:
print_headers_as_table(args)
elif args.fitsort:
print_headers_as_comparison(args)
else:
print_headers_traditional(args)
except OSError:
# A 'Broken pipe' OSError may occur when stdout is closed prematurely,
# eg. when calling `fitsheader file.fits | head`. We let this pass.
pass
|
3409caaa742c8846e972a93a1c0e6cb9c0b63d00d4b9d1f766179cec927f313a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
``fitscheck`` is a command line script based on astropy.io.fits for verifying
and updating the CHECKSUM and DATASUM keywords of .fits files. ``fitscheck``
can also detect and often fix other FITS standards violations. ``fitscheck``
facilitates re-writing the non-standard checksums originally generated by
astropy.io.fits with standard checksums which will interoperate with CFITSIO.
``fitscheck`` will refuse to write new checksums if the checksum keywords are
missing or their values are bad. Use ``--force`` to write new checksums
regardless of whether or not they currently exist or pass. Use
``--ignore-missing`` to tolerate missing checksum keywords without comment.
Example uses of fitscheck:
1. Add checksums::
$ fitscheck --write *.fits
2. Write new checksums, even if existing checksums are bad or missing::
$ fitscheck --write --force *.fits
3. Verify standard checksums and FITS compliance without changing the files::
$ fitscheck --compliance *.fits
4. Only check and fix compliance problems, ignoring checksums::
$ fitscheck --checksum none --compliance --write *.fits
5. Verify standard interoperable checksums::
$ fitscheck *.fits
6. Delete checksum keywords::
$ fitscheck --checksum remove --write *.fits
"""
import argparse
import logging
import sys
import warnings
from astropy import __version__
from astropy.io import fits
log = logging.getLogger("fitscheck")
DESCRIPTION = """
e.g. fitscheck example.fits
Verifies and optionally re-writes the CHECKSUM and DATASUM keywords
for a .fits file.
Optionally detects and fixes FITS standard compliance problems.
This script is part of the Astropy package. See
https://docs.astropy.org/en/latest/io/fits/usage/scripts.html#module-astropy.io.fits.scripts.fitscheck
for further documentation.
""".strip()
def handle_options(args):
if not len(args):
args = ["-h"]
parser = argparse.ArgumentParser(
description=DESCRIPTION, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--version", action="version", version=f"%(prog)s {__version__}"
)
parser.add_argument(
"fits_files", metavar="file", nargs="+", help=".fits files to process."
)
parser.add_argument(
"-k",
"--checksum",
dest="checksum_kind",
choices=["standard", "remove", "none"],
help="Choose FITS checksum mode or none. Defaults standard.",
default="standard",
)
parser.add_argument(
"-w",
"--write",
dest="write_file",
help="Write out file checksums and/or FITS compliance fixes.",
default=False,
action="store_true",
)
parser.add_argument(
"-f",
"--force",
dest="force",
help="Do file update even if original checksum was bad.",
default=False,
action="store_true",
)
parser.add_argument(
"-c",
"--compliance",
dest="compliance",
help="Do FITS compliance checking; fix if possible.",
default=False,
action="store_true",
)
parser.add_argument(
"-i",
"--ignore-missing",
dest="ignore_missing",
help="Ignore missing checksums.",
default=False,
action="store_true",
)
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
help="Generate extra output.",
default=False,
action="store_true",
)
global OPTIONS
OPTIONS = parser.parse_args(args)
if OPTIONS.checksum_kind == "none":
OPTIONS.checksum_kind = False
elif OPTIONS.checksum_kind == "standard":
OPTIONS.checksum_kind = True
elif OPTIONS.checksum_kind == "remove":
OPTIONS.write_file = True
OPTIONS.force = True
return OPTIONS.fits_files
def setup_logging():
log.handlers.clear()
if OPTIONS.verbose:
log.setLevel(logging.INFO)
else:
log.setLevel(logging.WARNING)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(message)s"))
log.addHandler(handler)
def verify_checksums(filename):
"""
Prints a message if any HDU in `filename` has a bad checksum or datasum.
"""
with warnings.catch_warnings(record=True) as wlist:
warnings.simplefilter("always")
with fits.open(filename, checksum=OPTIONS.checksum_kind) as hdulist:
for i, hdu in enumerate(hdulist):
# looping on HDUs is needed to read them and verify the
# checksums
if not OPTIONS.ignore_missing:
if not hdu._checksum:
log.warning(
f"MISSING {filename!r} .. Checksum not found in HDU #{i}"
)
return 1
if not hdu._datasum:
log.warning(
f"MISSING {filename!r} .. Datasum not found in HDU #{i}"
)
return 1
for w in wlist:
if str(w.message).startswith(
("Checksum verification failed", "Datasum verification failed")
):
log.warning("BAD %r %s", filename, str(w.message))
return 1
log.info(f"OK {filename!r}")
return 0
def verify_compliance(filename):
"""Check for FITS standard compliance."""
with fits.open(filename) as hdulist:
try:
hdulist.verify("exception")
except fits.VerifyError as exc:
log.warning("NONCOMPLIANT %r .. %s", filename, str(exc).replace("\n", " "))
return 1
return 0
def update(filename):
"""
Sets the ``CHECKSUM`` and ``DATASUM`` keywords for each HDU of `filename`.
Also updates fixes standards violations if possible and requested.
"""
output_verify = "silentfix" if OPTIONS.compliance else "ignore"
# For unit tests we reset temporarily the warning filters. Indeed, before
# updating the checksums, fits.open will verify the existing checksums and
# raise warnings, which are later caught and converted to log.warning...
# which is an issue when testing, using the "error" action to convert
# warnings to exceptions.
with warnings.catch_warnings():
warnings.resetwarnings()
with fits.open(
filename,
do_not_scale_image_data=True,
checksum=OPTIONS.checksum_kind,
mode="update",
) as hdulist:
hdulist.flush(output_verify=output_verify)
def process_file(filename):
"""
Handle a single .fits file, returning the count of checksum and compliance
errors.
"""
try:
checksum_errors = verify_checksums(filename)
if OPTIONS.compliance:
compliance_errors = verify_compliance(filename)
else:
compliance_errors = 0
if OPTIONS.write_file and checksum_errors == 0 or OPTIONS.force:
update(filename)
return checksum_errors + compliance_errors
except Exception as e:
log.error(f"EXCEPTION {filename!r} .. {e}")
return 1
def main(args=None):
"""
Processes command line parameters into options and files, then checks
or update FITS DATASUM and CHECKSUM keywords for the specified files.
"""
errors = 0
fits_files = handle_options(args or sys.argv[1:])
setup_logging()
for filename in fits_files:
errors += process_file(filename)
if errors:
log.warning(f"{errors} errors")
return int(bool(errors))
|
445a7cd2a9f126803e6c26b3363b428721b430e5f7969339ec295a01e84a0c3c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
``fitsinfo`` is a command-line script based on astropy.io.fits for
printing a summary of the HDUs in one or more FITS files(s) to the
standard output.
Example usage of ``fitsinfo``:
1. Print a summary of the HDUs in a FITS file::
$ fitsinfo filename.fits
Filename: filename.fits
No. Name Type Cards Dimensions Format
0 PRIMARY PrimaryHDU 138 ()
1 SCI ImageHDU 61 (800, 800) int16
2 SCI ImageHDU 61 (800, 800) int16
3 SCI ImageHDU 61 (800, 800) int16
4 SCI ImageHDU 61 (800, 800) int16
2. Print a summary of HDUs of all the FITS files in the current directory::
$ fitsinfo *.fits
"""
import argparse
import astropy.io.fits as fits
from astropy import __version__, log
DESCRIPTION = """
Print a summary of the HDUs in a FITS file(s).
This script is part of the Astropy package. See
https://docs.astropy.org/en/latest/io/fits/usage/scripts.html#module-astropy.io.fits.scripts.fitsinfo
for further documentation.
""".strip()
def fitsinfo(filename):
"""
Print a summary of the HDUs in a FITS file.
Parameters
----------
filename : str
The path to a FITS file.
"""
try:
fits.info(filename)
except OSError as e:
log.error(str(e))
return
def main(args=None):
"""The main function called by the `fitsinfo` script."""
parser = argparse.ArgumentParser(
description=DESCRIPTION, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--version", action="version", version=f"%(prog)s {__version__}"
)
parser.add_argument(
"filename",
nargs="+",
help="Path to one or more FITS files. Wildcards are supported.",
)
args = parser.parse_args(args)
for idx, filename in enumerate(args.filename):
if idx > 0:
print()
fitsinfo(filename)
|
182d255a3475ef85f3f575ed3ce50877d944e87624ba7cc6e7da71c12eac003b | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import sys
import numpy as np
from astropy.io.fits.column import FITS2NUMPY, ColDefs, Column
from astropy.io.fits.fitsrec import FITS_rec, FITS_record
from astropy.io.fits.util import _is_int, _is_pseudo_integer, _pseudo_zero
from astropy.utils import lazyproperty
from .base import DELAYED, DTYPE2BITPIX
from .image import PrimaryHDU
from .table import _TableLikeHDU
class Group(FITS_record):
"""
One group of the random group data.
"""
def __init__(self, input, row=0, start=None, end=None, step=None, base=None):
super().__init__(input, row, start, end, step, base)
@property
def parnames(self):
return self.array.parnames
@property
def data(self):
# The last column in the coldefs is the data portion of the group
return self.field(self.array._coldefs.names[-1])
@lazyproperty
def _unique(self):
return _par_indices(self.parnames)
def par(self, parname):
"""
Get the group parameter value.
"""
if _is_int(parname):
result = self.array[self.row][parname]
else:
indx = self._unique[parname.upper()]
if len(indx) == 1:
result = self.array[self.row][indx[0]]
# if more than one group parameter have the same name
else:
result = self.array[self.row][indx[0]].astype("f8")
for i in indx[1:]:
result += self.array[self.row][i]
return result
def setpar(self, parname, value):
"""
Set the group parameter value.
"""
# TODO: It would be nice if, instead of requiring a multi-part value to
# be an array, there were an *option* to automatically split the value
# into multiple columns if it doesn't already fit in the array data
# type.
if _is_int(parname):
self.array[self.row][parname] = value
else:
indx = self._unique[parname.upper()]
if len(indx) == 1:
self.array[self.row][indx[0]] = value
# if more than one group parameter have the same name, the
# value must be a list (or tuple) containing arrays
else:
if isinstance(value, (list, tuple)) and len(indx) == len(value):
for i in range(len(indx)):
self.array[self.row][indx[i]] = value[i]
else:
raise ValueError(
"Parameter value must be a sequence with "
"{} arrays/numbers.".format(len(indx))
)
class GroupData(FITS_rec):
"""
Random groups data object.
Allows structured access to FITS Group data in a manner analogous
to tables.
"""
_record_type = Group
def __new__(
cls,
input=None,
bitpix=None,
pardata=None,
parnames=[],
bscale=None,
bzero=None,
parbscales=None,
parbzeros=None,
):
"""
Parameters
----------
input : array or FITS_rec instance
input data, either the group data itself (a
`numpy.ndarray`) or a record array (`FITS_rec`) which will
contain both group parameter info and the data. The rest
of the arguments are used only for the first case.
bitpix : int
data type as expressed in FITS ``BITPIX`` value (8, 16, 32,
64, -32, or -64)
pardata : sequence of array
parameter data, as a list of (numeric) arrays.
parnames : sequence of str
list of parameter names.
bscale : int
``BSCALE`` of the data
bzero : int
``BZERO`` of the data
parbscales : sequence of int
list of bscales for the parameters
parbzeros : sequence of int
list of bzeros for the parameters
"""
if not isinstance(input, FITS_rec):
if pardata is None:
npars = 0
else:
npars = len(pardata)
if parbscales is None:
parbscales = [None] * npars
if parbzeros is None:
parbzeros = [None] * npars
if parnames is None:
parnames = [f"PAR{idx + 1}" for idx in range(npars)]
if len(parnames) != npars:
raise ValueError(
"The number of parameter data arrays does "
"not match the number of parameters."
)
unique_parnames = _unique_parnames(parnames + ["DATA"])
if bitpix is None:
bitpix = DTYPE2BITPIX[input.dtype.name]
fits_fmt = GroupsHDU._bitpix2tform[bitpix] # -32 -> 'E'
format = FITS2NUMPY[fits_fmt] # 'E' -> 'f4'
data_fmt = f"{str(input.shape[1:])}{format}"
formats = ",".join(([format] * npars) + [data_fmt])
gcount = input.shape[0]
cols = [
Column(
name=unique_parnames[idx],
format=fits_fmt,
bscale=parbscales[idx],
bzero=parbzeros[idx],
)
for idx in range(npars)
]
cols.append(
Column(
name=unique_parnames[-1],
format=fits_fmt,
bscale=bscale,
bzero=bzero,
)
)
coldefs = ColDefs(cols)
self = FITS_rec.__new__(
cls,
np.rec.array(None, formats=formats, names=coldefs.names, shape=gcount),
)
# By default the data field will just be 'DATA', but it may be
# uniquified if 'DATA' is already used by one of the group names
self._data_field = unique_parnames[-1]
self._coldefs = coldefs
self.parnames = parnames
for idx, name in enumerate(unique_parnames[:-1]):
column = coldefs[idx]
# Note: _get_scale_factors is used here and in other cases
# below to determine whether the column has non-default
# scale/zero factors.
# TODO: Find a better way to do this than using this interface
scale, zero = self._get_scale_factors(column)[3:5]
if scale or zero:
self._cache_field(name, pardata[idx])
else:
np.rec.recarray.field(self, idx)[:] = pardata[idx]
column = coldefs[self._data_field]
scale, zero = self._get_scale_factors(column)[3:5]
if scale or zero:
self._cache_field(self._data_field, input)
else:
np.rec.recarray.field(self, npars)[:] = input
else:
self = FITS_rec.__new__(cls, input)
self.parnames = None
return self
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
if isinstance(obj, GroupData):
self.parnames = obj.parnames
elif isinstance(obj, FITS_rec):
self.parnames = obj._coldefs.names
def __getitem__(self, key):
out = super().__getitem__(key)
if isinstance(out, GroupData):
out.parnames = self.parnames
return out
@property
def data(self):
"""
The raw group data represented as a multi-dimensional `numpy.ndarray`
array.
"""
# The last column in the coldefs is the data portion of the group
return self.field(self._coldefs.names[-1])
@lazyproperty
def _unique(self):
return _par_indices(self.parnames)
def par(self, parname):
"""
Get the group parameter values.
"""
if _is_int(parname):
result = self.field(parname)
else:
indx = self._unique[parname.upper()]
if len(indx) == 1:
result = self.field(indx[0])
# if more than one group parameter have the same name
else:
result = self.field(indx[0]).astype("f8")
for i in indx[1:]:
result += self.field(i)
return result
class GroupsHDU(PrimaryHDU, _TableLikeHDU):
"""
FITS Random Groups HDU class.
See the :ref:`astropy:random-groups` section in the Astropy documentation
for more details on working with this type of HDU.
"""
_bitpix2tform = {8: "B", 16: "I", 32: "J", 64: "K", -32: "E", -64: "D"}
_data_type = GroupData
_data_field = "DATA"
"""
The name of the table record array field that will contain the group data
for each group; 'DATA' by default, but may be preceded by any number of
underscores if 'DATA' is already a parameter name
"""
def __init__(self, data=None, header=None):
super().__init__(data=data, header=header)
if data is not DELAYED:
self.update_header()
# Update the axes; GROUPS HDUs should always have at least one axis
if len(self._axes) <= 0:
self._axes = [0]
self._header["NAXIS"] = 1
self._header.set("NAXIS1", 0, after="NAXIS")
@classmethod
def match_header(cls, header):
keyword = header.cards[0].keyword
return keyword == "SIMPLE" and "GROUPS" in header and header["GROUPS"] is True
@lazyproperty
def data(self):
"""
The data of a random group FITS file will be like a binary table's
data.
"""
if self._axes == [0]:
return
data = self._get_tbdata()
data._coldefs = self.columns
data.parnames = self.parnames
del self.columns
return data
@lazyproperty
def parnames(self):
"""The names of the group parameters as described by the header."""
pcount = self._header["PCOUNT"]
# The FITS standard doesn't really say what to do if a parname is
# missing, so for now just assume that won't happen
return [self._header["PTYPE" + str(idx + 1)] for idx in range(pcount)]
@lazyproperty
def columns(self):
if self._has_data and hasattr(self.data, "_coldefs"):
return self.data._coldefs
format = self._bitpix2tform[self._header["BITPIX"]]
pcount = self._header["PCOUNT"]
parnames = []
bscales = []
bzeros = []
for idx in range(pcount):
bscales.append(self._header.get("PSCAL" + str(idx + 1), None))
bzeros.append(self._header.get("PZERO" + str(idx + 1), None))
parnames.append(self._header["PTYPE" + str(idx + 1)])
formats = [format] * len(parnames)
dim = [None] * len(parnames)
# Now create columns from collected parameters, but first add the DATA
# column too, to contain the group data.
parnames.append("DATA")
bscales.append(self._header.get("BSCALE"))
bzeros.append(self._header.get("BZEROS"))
data_shape = self.shape[:-1]
formats.append(str(int(np.prod(data_shape))) + format)
dim.append(data_shape)
parnames = _unique_parnames(parnames)
self._data_field = parnames[-1]
cols = [
Column(name=name, format=fmt, bscale=bscale, bzero=bzero, dim=dim)
for name, fmt, bscale, bzero, dim in zip(
parnames, formats, bscales, bzeros, dim
)
]
coldefs = ColDefs(cols)
return coldefs
@property
def _nrows(self):
if not self._data_loaded:
# The number of 'groups' equates to the number of rows in the table
# representation of the data
return self._header.get("GCOUNT", 0)
else:
return len(self.data)
@lazyproperty
def _theap(self):
# Only really a lazyproperty for symmetry with _TableBaseHDU
return 0
@property
def is_image(self):
return False
@property
def size(self):
"""
Returns the size (in bytes) of the HDU's data part.
"""
size = 0
naxis = self._header.get("NAXIS", 0)
# for random group image, NAXIS1 should be 0, so we skip NAXIS1.
if naxis > 1:
size = 1
for idx in range(1, naxis):
size = size * self._header["NAXIS" + str(idx + 1)]
bitpix = self._header["BITPIX"]
gcount = self._header.get("GCOUNT", 1)
pcount = self._header.get("PCOUNT", 0)
size = abs(bitpix) * gcount * (pcount + size) // 8
return size
def update_header(self):
old_naxis = self._header.get("NAXIS", 0)
if self._data_loaded:
if isinstance(self.data, GroupData):
self._axes = list(self.data.data.shape)[1:]
self._axes.reverse()
self._axes = [0] + self._axes
field0 = self.data.dtype.names[0]
field0_code = self.data.dtype.fields[field0][0].name
elif self.data is None:
self._axes = [0]
field0_code = "uint8" # For lack of a better default
else:
raise ValueError("incorrect array type")
self._header["BITPIX"] = DTYPE2BITPIX[field0_code]
self._header["NAXIS"] = len(self._axes)
# add NAXISi if it does not exist
for idx, axis in enumerate(self._axes):
if idx == 0:
after = "NAXIS"
else:
after = "NAXIS" + str(idx)
self._header.set("NAXIS" + str(idx + 1), axis, after=after)
# delete extra NAXISi's
for idx in range(len(self._axes) + 1, old_naxis + 1):
try:
del self._header["NAXIS" + str(idx)]
except KeyError:
pass
if self._has_data and isinstance(self.data, GroupData):
self._header.set("GROUPS", True, after="NAXIS" + str(len(self._axes)))
self._header.set("PCOUNT", len(self.data.parnames), after="GROUPS")
self._header.set("GCOUNT", len(self.data), after="PCOUNT")
column = self.data._coldefs[self._data_field]
scale, zero = self.data._get_scale_factors(column)[3:5]
if scale:
self._header.set("BSCALE", column.bscale)
if zero:
self._header.set("BZERO", column.bzero)
for idx, name in enumerate(self.data.parnames):
self._header.set("PTYPE" + str(idx + 1), name)
column = self.data._coldefs[idx]
scale, zero = self.data._get_scale_factors(column)[3:5]
if scale:
self._header.set("PSCAL" + str(idx + 1), column.bscale)
if zero:
self._header.set("PZERO" + str(idx + 1), column.bzero)
# Update the position of the EXTEND keyword if it already exists
if "EXTEND" in self._header:
if len(self._axes):
after = "NAXIS" + str(len(self._axes))
else:
after = "NAXIS"
self._header.set("EXTEND", after=after)
def _writedata_internal(self, fileobj):
"""
Basically copy/pasted from `_ImageBaseHDU._writedata_internal()`, but
we have to get the data's byte order a different way...
TODO: Might be nice to store some indication of the data's byte order
as an attribute or function so that we don't have to do this.
"""
size = 0
if self.data is not None:
self.data._scale_back()
# Based on the system type, determine the byteorders that
# would need to be swapped to get to big-endian output
if sys.byteorder == "little":
swap_types = ("<", "=")
else:
swap_types = ("<",)
# deal with unsigned integer 16, 32 and 64 data
if _is_pseudo_integer(self.data.dtype):
# Convert the unsigned array to signed
output = np.array(
self.data - _pseudo_zero(self.data.dtype),
dtype=f">i{self.data.dtype.itemsize}",
)
should_swap = False
else:
output = self.data
fname = self.data.dtype.names[0]
byteorder = self.data.dtype.fields[fname][0].str[0]
should_swap = byteorder in swap_types
if should_swap:
if output.flags.writeable:
output.byteswap(True)
try:
fileobj.writearray(output)
finally:
output.byteswap(True)
else:
# For read-only arrays, there is no way around making
# a byteswapped copy of the data.
fileobj.writearray(output.byteswap(False))
else:
fileobj.writearray(output)
size += output.size * output.itemsize
return size
def _verify(self, option="warn"):
errs = super()._verify(option=option)
# Verify locations and values of mandatory keywords.
self.req_cards(
"NAXIS", 2, lambda v: (_is_int(v) and 1 <= v <= 999), 1, option, errs
)
self.req_cards("NAXIS1", 3, lambda v: (_is_int(v) and v == 0), 0, option, errs)
after = self._header["NAXIS"] + 3
pos = lambda x: x >= after
self.req_cards("GCOUNT", pos, _is_int, 1, option, errs)
self.req_cards("PCOUNT", pos, _is_int, 0, option, errs)
self.req_cards("GROUPS", pos, lambda v: (v is True), True, option, errs)
return errs
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# We have the data to be used.
# Check the byte order of the data. If it is little endian we
# must swap it before calculating the datasum.
# TODO: Maybe check this on a per-field basis instead of assuming
# that all fields have the same byte order?
byteorder = self.data.dtype.fields[self.data.dtype.names[0]][0].str[0]
if byteorder != ">":
if self.data.flags.writeable:
byteswapped = True
d = self.data.byteswap(True)
d.dtype = d.dtype.newbyteorder(">")
else:
# If the data is not writeable, we just make a byteswapped
# copy and don't bother changing it back after
d = self.data.byteswap(False)
d.dtype = d.dtype.newbyteorder(">")
byteswapped = False
else:
byteswapped = False
d = self.data
byte_data = d.view(type=np.ndarray, dtype=np.ubyte)
cs = self._compute_checksum(byte_data)
# If the data was byteswapped in this method then return it to
# its original little-endian order.
if byteswapped:
d.byteswap(True)
d.dtype = d.dtype.newbyteorder("<")
return cs
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
def _summary(self):
summary = super()._summary()
name, ver, classname, length, shape, format, gcount = summary
# Drop the first axis from the shape
if shape:
shape = shape[1:]
if shape and all(shape):
# Update the format
format = self.columns[0].dtype.name
# Update the GCOUNT report
gcount = f"{self._gcount} Groups {self._pcount} Parameters"
return (name, ver, classname, length, shape, format, gcount)
def _par_indices(names):
"""
Given a list of objects, returns a mapping of objects in that list to the
index or indices at which that object was found in the list.
"""
unique = {}
for idx, name in enumerate(names):
# Case insensitive
name = name.upper()
if name in unique:
unique[name].append(idx)
else:
unique[name] = [idx]
return unique
def _unique_parnames(names):
"""
Given a list of parnames, including possible duplicates, returns a new list
of parnames with duplicates prepended by one or more underscores to make
them unique. This is also case insensitive.
"""
upper_names = set()
unique_names = []
for name in names:
name_upper = name.upper()
while name_upper in upper_names:
name = "_" + name
name_upper = "_" + name_upper
unique_names.append(name)
upper_names.add(name_upper)
return unique_names
|
0196b8f8570cfc4a89f4d8b08be5ebd1175092ab9d3bf381775e7814c892ff14 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import contextlib
import csv
import operator
import os
import re
import sys
import textwrap
import warnings
from contextlib import suppress
import numpy as np
from numpy import char as chararray
# This module may have many dependencies on astropy.io.fits.column, but
# astropy.io.fits.column has fewer dependencies overall, so it's easier to
# keep table/column-related utilities in astropy.io.fits.column
from astropy.io.fits.column import (
ATTRIBUTE_TO_KEYWORD,
FITS2NUMPY,
KEYWORD_NAMES,
KEYWORD_TO_ATTRIBUTE,
TDEF_RE,
ColDefs,
Column,
_AsciiColDefs,
_cmp_recformats,
_convert_format,
_FormatP,
_FormatQ,
_makep,
_parse_tformat,
_scalar_to_format,
)
from astropy.io.fits.fitsrec import FITS_rec, _get_recarray_field, _has_unicode_fields
from astropy.io.fits.header import Header, _pad_length
from astropy.io.fits.util import _is_int, _str_to_num, path_like
from astropy.utils import lazyproperty
from astropy.utils.exceptions import AstropyDeprecationWarning
from .base import DELAYED, ExtensionHDU, _ValidHDU
class FITSTableDumpDialect(csv.excel):
"""
A CSV dialect for the Astropy format of ASCII dumps of FITS tables.
"""
delimiter = " "
lineterminator = "\n"
quotechar = '"'
quoting = csv.QUOTE_ALL
skipinitialspace = True
class _TableLikeHDU(_ValidHDU):
"""
A class for HDUs that have table-like data. This is used for both
Binary/ASCII tables as well as Random Access Group HDUs (which are
otherwise too dissimilar for tables to use _TableBaseHDU directly).
"""
_data_type = FITS_rec
_columns_type = ColDefs
# TODO: Temporary flag representing whether uints are enabled; remove this
# after restructuring to support uints by default on a per-column basis
_uint = False
@classmethod
def match_header(cls, header):
"""
This is an abstract HDU type for HDUs that contain table-like data.
This is even more abstract than _TableBaseHDU which is specifically for
the standard ASCII and Binary Table types.
"""
raise NotImplementedError
@classmethod
def from_columns(
cls,
columns,
header=None,
nrows=0,
fill=False,
character_as_bytes=False,
**kwargs,
):
"""
Given either a `ColDefs` object, a sequence of `Column` objects,
or another table HDU or table data (a `FITS_rec` or multi-field
`numpy.ndarray` or `numpy.recarray` object, return a new table HDU of
the class this method was called on using the column definition from
the input.
See also `FITS_rec.from_columns`.
Parameters
----------
columns : sequence of `Column`, `ColDefs` -like
The columns from which to create the table data, or an object with
a column-like structure from which a `ColDefs` can be instantiated.
This includes an existing `BinTableHDU` or `TableHDU`, or a
`numpy.recarray` to give some examples.
If these columns have data arrays attached that data may be used in
initializing the new table. Otherwise the input columns will be
used as a template for a new table with the requested number of
rows.
header : `Header`
An optional `Header` object to instantiate the new HDU yet. Header
keywords specifically related to defining the table structure (such
as the "TXXXn" keywords like TTYPEn) will be overridden by the
supplied column definitions, but all other informational and data
model-specific keywords are kept.
nrows : int
Number of rows in the new table. If the input columns have data
associated with them, the size of the largest input column is used.
Otherwise the default is 0.
fill : bool
If `True`, will fill all cells with zeros or blanks. If `False`,
copy the data from input, undefined cells will still be filled with
zeros/blanks.
character_as_bytes : bool
Whether to return bytes for string columns when accessed from the
HDU. By default this is `False` and (unicode) strings are returned,
but for large tables this may use up a lot of memory.
Notes
-----
Any additional keyword arguments accepted by the HDU class's
``__init__`` may also be passed in as keyword arguments.
"""
coldefs = cls._columns_type(columns)
data = FITS_rec.from_columns(
coldefs, nrows=nrows, fill=fill, character_as_bytes=character_as_bytes
)
hdu = cls(
data=data, header=header, character_as_bytes=character_as_bytes, **kwargs
)
coldefs._add_listener(hdu)
return hdu
@lazyproperty
def columns(self):
"""
The :class:`ColDefs` objects describing the columns in this table.
"""
# The base class doesn't make any assumptions about where the column
# definitions come from, so just return an empty ColDefs
return ColDefs([])
@property
def _nrows(self):
"""
table-like HDUs must provide an attribute that specifies the number of
rows in the HDU's table.
For now this is an internal-only attribute.
"""
raise NotImplementedError
def _get_tbdata(self):
"""Get the table data from an input HDU object."""
columns = self.columns
# TODO: Details related to variable length arrays need to be dealt with
# specifically in the BinTableHDU class, since they're a detail
# specific to FITS binary tables
if (
any(type(r) in (_FormatP, _FormatQ) for r in columns._recformats)
and self._data_size is not None
and self._data_size > self._theap
):
# We have a heap; include it in the raw_data
raw_data = self._get_raw_data(self._data_size, np.uint8, self._data_offset)
tbsize = self._header["NAXIS1"] * self._header["NAXIS2"]
data = raw_data[:tbsize].view(dtype=columns.dtype, type=np.rec.recarray)
else:
raw_data = self._get_raw_data(self._nrows, columns.dtype, self._data_offset)
if raw_data is None:
# This can happen when a brand new table HDU is being created
# and no data has been assigned to the columns, which case just
# return an empty array
raw_data = np.array([], dtype=columns.dtype)
data = raw_data.view(np.rec.recarray)
self._init_tbdata(data)
data = data.view(self._data_type)
columns._add_listener(data)
return data
def _init_tbdata(self, data):
columns = self.columns
data.dtype = data.dtype.newbyteorder(">")
# hack to enable pseudo-uint support
data._uint = self._uint
# pass datLoc, for P format
data._heapoffset = self._theap
data._heapsize = self._header["PCOUNT"]
tbsize = self._header["NAXIS1"] * self._header["NAXIS2"]
data._gap = self._theap - tbsize
# pass the attributes
for idx, col in enumerate(columns):
# get the data for each column object from the rec.recarray
col.array = data.field(idx)
# delete the _arrays attribute so that it is recreated to point to the
# new data placed in the column object above
del columns._arrays
def _update_load_data(self):
"""Load the data if asked to."""
if not self._data_loaded:
self.data
def _update_column_added(self, columns, column):
"""
Update the data upon addition of a new column through the `ColDefs`
interface.
"""
# recreate data from the columns
self.data = FITS_rec.from_columns(
self.columns,
nrows=self._nrows,
fill=False,
character_as_bytes=self._character_as_bytes,
)
def _update_column_removed(self, columns, col_idx):
"""
Update the data upon removal of a column through the `ColDefs`
interface.
"""
# recreate data from the columns
self.data = FITS_rec.from_columns(
self.columns,
nrows=self._nrows,
fill=False,
character_as_bytes=self._character_as_bytes,
)
class _TableBaseHDU(ExtensionHDU, _TableLikeHDU):
"""
FITS table extension base HDU class.
Parameters
----------
data : array
Data to be used.
header : `Header` instance
Header to be used. If the ``data`` is also specified, header keywords
specifically related to defining the table structure (such as the
"TXXXn" keywords like TTYPEn) will be overridden by the supplied column
definitions, but all other informational and data model-specific
keywords are kept.
name : str
Name to be populated in ``EXTNAME`` keyword.
uint : bool, optional
Set to `True` if the table contains unsigned integer columns.
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
character_as_bytes : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
"""
_manages_own_heap = False
"""
This flag implies that when writing VLA tables (P/Q format) the heap
pointers that go into P/Q table columns should not be reordered or
rearranged in any way by the default heap management code.
This is included primarily as an optimization for compressed image HDUs
which perform their own heap maintenance.
"""
def __init__(
self,
data=None,
header=None,
name=None,
uint=False,
ver=None,
character_as_bytes=False,
):
super().__init__(data=data, header=header, name=name, ver=ver)
self._uint = uint
self._character_as_bytes = character_as_bytes
if data is DELAYED:
# this should never happen
if header is None:
raise ValueError("No header to setup HDU.")
# if the file is read the first time, no need to copy, and keep it
# unchanged
else:
self._header = header
else:
# construct a list of cards of minimal header
cards = [
("XTENSION", self._extension, self._ext_comment),
("BITPIX", 8, "array data type"),
("NAXIS", 2, "number of array dimensions"),
("NAXIS1", 0, "length of dimension 1"),
("NAXIS2", 0, "length of dimension 2"),
("PCOUNT", 0, "number of group parameters"),
("GCOUNT", 1, "number of groups"),
("TFIELDS", 0, "number of table fields"),
]
if header is not None:
# Make a "copy" (not just a view) of the input header, since it
# may get modified. the data is still a "view" (for now)
hcopy = header.copy(strip=True)
cards.extend(hcopy.cards)
self._header = Header(cards)
if isinstance(data, np.ndarray) and data.dtype.fields is not None:
# self._data_type is FITS_rec.
if isinstance(data, self._data_type):
self.data = data
else:
self.data = self._data_type.from_columns(data)
# TEMP: Special column keywords are normally overwritten by attributes
# from Column objects. In Astropy 3.0, several new keywords are now
# recognized as being special column keywords, but we don't
# automatically clear them yet, as we need to raise a deprecation
# warning for at least one major version.
if header is not None:
future_ignore = set()
for keyword in header.keys():
match = TDEF_RE.match(keyword)
try:
base_keyword = match.group("label")
except Exception:
continue # skip if there is no match
if base_keyword in {
"TCTYP",
"TCUNI",
"TCRPX",
"TCRVL",
"TCDLT",
"TRPOS",
}:
future_ignore.add(base_keyword)
if future_ignore:
keys = ", ".join(x + "n" for x in sorted(future_ignore))
warnings.warn(
"The following keywords are now recognized as special "
"column-related attributes and should be set via the "
"Column objects: {}. In future, these values will be "
"dropped from manually specified headers automatically "
"and replaced with values generated based on the "
"Column objects.".format(keys),
AstropyDeprecationWarning,
)
# TODO: Too much of the code in this class uses header keywords
# in making calculations related to the data size. This is
# unreliable, however, in cases when users mess with the header
# unintentionally--code that does this should be cleaned up.
self._header["NAXIS1"] = self.data._raw_itemsize
self._header["NAXIS2"] = self.data.shape[0]
self._header["TFIELDS"] = len(self.data._coldefs)
self.columns = self.data._coldefs
self.columns._add_listener(self.data)
self.update()
with suppress(TypeError, AttributeError):
# Make the ndarrays in the Column objects of the ColDefs
# object of the HDU reference the same ndarray as the HDU's
# FITS_rec object.
for idx, col in enumerate(self.columns):
col.array = self.data.field(idx)
# Delete the _arrays attribute so that it is recreated to
# point to the new data placed in the column objects above
del self.columns._arrays
elif data is None:
pass
else:
raise TypeError("Table data has incorrect type.")
# Ensure that the correct EXTNAME is set on the new header if one was
# created, or that it overrides the existing EXTNAME if different
if name:
self.name = name
if ver is not None:
self.ver = ver
@classmethod
def match_header(cls, header):
"""
This is an abstract type that implements the shared functionality of
the ASCII and Binary Table HDU types, which should be used instead of
this.
"""
raise NotImplementedError
@lazyproperty
def columns(self):
"""
The :class:`ColDefs` objects describing the columns in this table.
"""
if self._has_data and hasattr(self.data, "_coldefs"):
return self.data._coldefs
return self._columns_type(self)
@lazyproperty
def data(self):
data = self._get_tbdata()
data._coldefs = self.columns
data._character_as_bytes = self._character_as_bytes
# Columns should now just return a reference to the data._coldefs
del self.columns
return data
@data.setter
def data(self, data):
if "data" in self.__dict__:
if self.__dict__["data"] is data:
return
else:
self._data_replaced = True
else:
self._data_replaced = True
self._modified = True
if data is None and self.columns:
# Create a new table with the same columns, but empty rows
formats = ",".join(self.columns._recformats)
data = np.rec.array(
None, formats=formats, names=self.columns.names, shape=0
)
if isinstance(data, np.ndarray) and data.dtype.fields is not None:
# Go ahead and always make a view, even if the data is already the
# correct class (self._data_type) so we can update things like the
# column defs, if necessary
data = data.view(self._data_type)
if not isinstance(data.columns, self._columns_type):
# This would be the place, if the input data was for an ASCII
# table and this is binary table, or vice versa, to convert the
# data to the appropriate format for the table type
new_columns = self._columns_type(data.columns)
data = FITS_rec.from_columns(new_columns)
if "data" in self.__dict__:
self.columns._remove_listener(self.__dict__["data"])
self.__dict__["data"] = data
self.columns = self.data.columns
self.columns._add_listener(self.data)
self.update()
with suppress(TypeError, AttributeError):
# Make the ndarrays in the Column objects of the ColDefs
# object of the HDU reference the same ndarray as the HDU's
# FITS_rec object.
for idx, col in enumerate(self.columns):
col.array = self.data.field(idx)
# Delete the _arrays attribute so that it is recreated to
# point to the new data placed in the column objects above
del self.columns._arrays
elif data is None:
pass
else:
raise TypeError("Table data has incorrect type.")
# returning the data signals to lazyproperty that we've already handled
# setting self.__dict__['data']
return data
@property
def _nrows(self):
if not self._data_loaded:
return self._header.get("NAXIS2", 0)
else:
return len(self.data)
@lazyproperty
def _theap(self):
size = self._header["NAXIS1"] * self._header["NAXIS2"]
return self._header.get("THEAP", size)
# TODO: Need to either rename this to update_header, for symmetry with the
# Image HDUs, or just at some point deprecate it and remove it altogether,
# since header updates should occur automatically when necessary...
def update(self):
"""
Update header keywords to reflect recent changes of columns.
"""
self._header.set("NAXIS1", self.data._raw_itemsize, after="NAXIS")
self._header.set("NAXIS2", self.data.shape[0], after="NAXIS1")
self._header.set("TFIELDS", len(self.columns), after="GCOUNT")
self._clear_table_keywords()
self._populate_table_keywords()
def copy(self):
"""
Make a copy of the table HDU, both header and data are copied.
"""
# touch the data, so it's defined (in the case of reading from a
# FITS file)
return self.__class__(data=self.data.copy(), header=self._header.copy())
def _prewriteto(self, checksum=False, inplace=False):
if self._has_data:
self.data._scale_back(update_heap_pointers=not self._manages_own_heap)
# check TFIELDS and NAXIS2
self._header["TFIELDS"] = len(self.data._coldefs)
self._header["NAXIS2"] = self.data.shape[0]
# calculate PCOUNT, for variable length tables
tbsize = self._header["NAXIS1"] * self._header["NAXIS2"]
heapstart = self._header.get("THEAP", tbsize)
self.data._gap = heapstart - tbsize
pcount = self.data._heapsize + self.data._gap
if pcount > 0:
self._header["PCOUNT"] = pcount
# update the other T****n keywords
self._populate_table_keywords()
# update TFORM for variable length columns
for idx in range(self.data._nfields):
format = self.data._coldefs._recformats[idx]
if isinstance(format, _FormatP):
_max = self.data.field(idx).max
# May be either _FormatP or _FormatQ
format_cls = format.__class__
format = format_cls(format.dtype, repeat=format.repeat, max=_max)
self._header["TFORM" + str(idx + 1)] = format.tform
return super()._prewriteto(checksum, inplace)
def _verify(self, option="warn"):
"""
_TableBaseHDU verify method.
"""
errs = super()._verify(option=option)
if len(self._header) > 1:
if not (
isinstance(self._header[0], str)
and self._header[0].rstrip() == self._extension
):
err_text = "The XTENSION keyword must match the HDU type."
fix_text = f"Converted the XTENSION keyword to {self._extension}."
def fix(header=self._header):
header[0] = (self._extension, self._ext_comment)
errs.append(
self.run_option(
option, err_text=err_text, fix_text=fix_text, fix=fix
)
)
self.req_cards("NAXIS", None, lambda v: (v == 2), 2, option, errs)
self.req_cards("BITPIX", None, lambda v: (v == 8), 8, option, errs)
self.req_cards(
"TFIELDS",
7,
lambda v: (_is_int(v) and v >= 0 and v <= 999),
0,
option,
errs,
)
tfields = self._header["TFIELDS"]
for idx in range(tfields):
self.req_cards("TFORM" + str(idx + 1), None, None, None, option, errs)
return errs
def _summary(self):
"""
Summarize the HDU: name, dimensions, and formats.
"""
class_name = self.__class__.__name__
# if data is touched, use data info.
if self._data_loaded:
if self.data is None:
nrows = 0
else:
nrows = len(self.data)
ncols = len(self.columns)
format = self.columns.formats
# if data is not touched yet, use header info.
else:
nrows = self._header["NAXIS2"]
ncols = self._header["TFIELDS"]
format = ", ".join(
[self._header["TFORM" + str(j + 1)] for j in range(ncols)]
)
format = f"[{format}]"
dims = f"{nrows}R x {ncols}C"
ncards = len(self._header)
return (self.name, self.ver, class_name, ncards, dims, format)
def _update_column_removed(self, columns, idx):
super()._update_column_removed(columns, idx)
# Fix the header to reflect the column removal
self._clear_table_keywords(index=idx)
def _update_column_attribute_changed(
self, column, col_idx, attr, old_value, new_value
):
"""
Update the header when one of the column objects is updated.
"""
# base_keyword is the keyword without the index such as TDIM
# while keyword is like TDIM1
base_keyword = ATTRIBUTE_TO_KEYWORD[attr]
keyword = base_keyword + str(col_idx + 1)
if keyword in self._header:
if new_value is None:
# If the new value is None, i.e. None was assigned to the
# column attribute, then treat this as equivalent to deleting
# that attribute
del self._header[keyword]
else:
self._header[keyword] = new_value
else:
keyword_idx = KEYWORD_NAMES.index(base_keyword)
# Determine the appropriate keyword to insert this one before/after
# if it did not already exist in the header
for before_keyword in reversed(KEYWORD_NAMES[:keyword_idx]):
before_keyword += str(col_idx + 1)
if before_keyword in self._header:
self._header.insert(
before_keyword, (keyword, new_value), after=True
)
break
else:
for after_keyword in KEYWORD_NAMES[keyword_idx + 1 :]:
after_keyword += str(col_idx + 1)
if after_keyword in self._header:
self._header.insert(after_keyword, (keyword, new_value))
break
else:
# Just append
self._header[keyword] = new_value
def _clear_table_keywords(self, index=None):
"""
Wipe out any existing table definition keywords from the header.
If specified, only clear keywords for the given table index (shifting
up keywords for any other columns). The index is zero-based.
Otherwise keywords for all columns.
"""
# First collect all the table structure related keyword in the header
# into a single list so we can then sort them by index, which will be
# useful later for updating the header in a sensible order (since the
# header *might* not already be written in a reasonable order)
table_keywords = []
for idx, keyword in enumerate(self._header.keys()):
match = TDEF_RE.match(keyword)
try:
base_keyword = match.group("label")
except Exception:
continue # skip if there is no match
if base_keyword in KEYWORD_TO_ATTRIBUTE:
# TEMP: For Astropy 3.0 we don't clear away the following keywords
# as we are first raising a deprecation warning that these will be
# dropped automatically if they were specified in the header. We
# can remove this once we are happy to break backward-compatibility
if base_keyword in {
"TCTYP",
"TCUNI",
"TCRPX",
"TCRVL",
"TCDLT",
"TRPOS",
}:
continue
num = int(match.group("num")) - 1 # convert to zero-base
table_keywords.append((idx, match.group(0), base_keyword, num))
# First delete
rev_sorted_idx_0 = sorted(
table_keywords, key=operator.itemgetter(0), reverse=True
)
for idx, keyword, _, num in rev_sorted_idx_0:
if index is None or index == num:
del self._header[idx]
# Now shift up remaining column keywords if only one column was cleared
if index is not None:
sorted_idx_3 = sorted(table_keywords, key=operator.itemgetter(3))
for _, keyword, base_keyword, num in sorted_idx_3:
if num <= index:
continue
old_card = self._header.cards[keyword]
new_card = (base_keyword + str(num), old_card.value, old_card.comment)
self._header.insert(keyword, new_card)
del self._header[keyword]
# Also decrement TFIELDS
if "TFIELDS" in self._header:
self._header["TFIELDS"] -= 1
def _populate_table_keywords(self):
"""Populate the new table definition keywords from the header."""
for idx, column in enumerate(self.columns):
for keyword, attr in KEYWORD_TO_ATTRIBUTE.items():
val = getattr(column, attr)
if val is not None:
keyword = keyword + str(idx + 1)
self._header[keyword] = val
class TableHDU(_TableBaseHDU):
"""
FITS ASCII table extension HDU class.
Parameters
----------
data : array or `FITS_rec`
Data to be used.
header : `Header`
Header to be used.
name : str
Name to be populated in ``EXTNAME`` keyword.
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
character_as_bytes : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
"""
_extension = "TABLE"
_ext_comment = "ASCII table extension"
_padding_byte = " "
_columns_type = _AsciiColDefs
__format_RE = re.compile(r"(?P<code>[ADEFIJ])(?P<width>\d+)(?:\.(?P<prec>\d+))?")
def __init__(
self, data=None, header=None, name=None, ver=None, character_as_bytes=False
):
super().__init__(
data, header, name=name, ver=ver, character_as_bytes=character_as_bytes
)
@classmethod
def match_header(cls, header):
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
return card.keyword == "XTENSION" and xtension == cls._extension
def _get_tbdata(self):
columns = self.columns
names = [n for idx, n in enumerate(columns.names)]
# determine if there are duplicate field names and if there
# are throw an exception
dup = np.rec.find_duplicate(names)
if dup:
raise ValueError(f"Duplicate field names: {dup}")
# TODO: Determine if this extra logic is necessary--I feel like the
# _AsciiColDefs class should be responsible for telling the table what
# its dtype should be...
itemsize = columns.spans[-1] + columns.starts[-1] - 1
dtype = {}
for idx in range(len(columns)):
data_type = "S" + str(columns.spans[idx])
if idx == len(columns) - 1:
# The last column is padded out to the value of NAXIS1
if self._header["NAXIS1"] > itemsize:
data_type = "S" + str(
columns.spans[idx] + self._header["NAXIS1"] - itemsize
)
dtype[columns.names[idx]] = (data_type, columns.starts[idx] - 1)
raw_data = self._get_raw_data(self._nrows, dtype, self._data_offset)
data = raw_data.view(np.rec.recarray)
self._init_tbdata(data)
return data.view(self._data_type)
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# We have the data to be used.
# We need to pad the data to a block length before calculating
# the datasum.
bytes_array = self.data.view(type=np.ndarray, dtype=np.ubyte)
padding = np.frombuffer(_pad_length(self.size) * b" ", dtype=np.ubyte)
d = np.append(bytes_array, padding)
cs = self._compute_checksum(d)
return cs
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
def _verify(self, option="warn"):
"""
`TableHDU` verify method.
"""
errs = super()._verify(option=option)
self.req_cards("PCOUNT", None, lambda v: (v == 0), 0, option, errs)
tfields = self._header["TFIELDS"]
for idx in range(tfields):
self.req_cards("TBCOL" + str(idx + 1), None, _is_int, None, option, errs)
return errs
class BinTableHDU(_TableBaseHDU):
"""
Binary table HDU class.
Parameters
----------
data : array, `FITS_rec`, or `~astropy.table.Table`
Data to be used.
header : `Header`
Header to be used.
name : str
Name to be populated in ``EXTNAME`` keyword.
uint : bool, optional
Set to `True` if the table contains unsigned integer columns.
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
character_as_bytes : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
"""
_extension = "BINTABLE"
_ext_comment = "binary table extension"
def __init__(
self,
data=None,
header=None,
name=None,
uint=False,
ver=None,
character_as_bytes=False,
):
from astropy.table import Table
if isinstance(data, Table):
from astropy.io.fits.convenience import table_to_hdu
hdu = table_to_hdu(data)
if header is not None:
hdu.header.update(header)
data = hdu.data
header = hdu.header
super().__init__(
data,
header,
name=name,
uint=uint,
ver=ver,
character_as_bytes=character_as_bytes,
)
@classmethod
def match_header(cls, header):
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
return card.keyword == "XTENSION" and xtension in (cls._extension, "A3DTABLE")
def _calculate_datasum_with_heap(self):
"""
Calculate the value for the ``DATASUM`` card given the input data.
"""
with _binary_table_byte_swap(self.data) as data:
dout = data.view(type=np.ndarray, dtype=np.ubyte)
csum = self._compute_checksum(dout)
# Now add in the heap data to the checksum (we can skip any gap
# between the table and the heap since it's all zeros and doesn't
# contribute to the checksum
if data._get_raw_data() is None:
# This block is still needed because
# test_variable_length_table_data leads to ._get_raw_data
# returning None which means _get_heap_data doesn't work.
# Which happens when the data is loaded in memory rather than
# being unloaded on disk
for idx in range(data._nfields):
if isinstance(data.columns._recformats[idx], _FormatP):
for coldata in data.field(idx):
# coldata should already be byteswapped from the call
# to _binary_table_byte_swap
if not len(coldata):
continue
csum = self._compute_checksum(coldata, csum)
else:
csum = self._compute_checksum(data._get_heap_data(), csum)
return csum
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# This method calculates the datasum while incorporating any
# heap data, which is obviously not handled from the base
# _calculate_datasum
return self._calculate_datasum_with_heap()
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
def _writedata_internal(self, fileobj):
size = 0
if self.data is None:
return size
with _binary_table_byte_swap(self.data) as data:
if _has_unicode_fields(data):
# If the raw data was a user-supplied recarray, we can't write
# unicode columns directly to the file, so we have to switch
# to a slower row-by-row write
self._writedata_by_row(fileobj)
else:
fileobj.writearray(data)
# write out the heap of variable length array columns this has
# to be done after the "regular" data is written (above)
# to avoid a bug in the lustre filesystem client, don't
# write 0-byte objects
if data._gap > 0:
fileobj.write((data._gap * "\0").encode("ascii"))
nbytes = data._gap
if not self._manages_own_heap:
# Write the heap data one column at a time, in the order
# that the data pointers appear in the column (regardless
# if that data pointer has a different, previous heap
# offset listed)
for idx in range(data._nfields):
if not isinstance(data.columns._recformats[idx], _FormatP):
continue
field = self.data.field(idx)
for row in field:
if len(row) > 0:
nbytes += row.nbytes
fileobj.writearray(row)
else:
heap_data = data._get_heap_data()
if len(heap_data) > 0:
nbytes += len(heap_data)
fileobj.writearray(heap_data)
data._heapsize = nbytes - data._gap
size += nbytes
size += self.data.size * self.data._raw_itemsize
return size
def _writedata_by_row(self, fileobj):
fields = [self.data.field(idx) for idx in range(len(self.data.columns))]
# Creating Record objects is expensive (as in
# `for row in self.data:` so instead we just iterate over the row
# indices and get one field at a time:
for idx in range(len(self.data)):
for field in fields:
item = field[idx]
field_width = None
if field.dtype.kind == "U":
# Read the field *width* by reading past the field kind.
i = field.dtype.str.index(field.dtype.kind)
field_width = int(field.dtype.str[i + 1 :])
item = np.char.encode(item, "ascii")
fileobj.writearray(item)
if field_width is not None:
j = item.dtype.str.index(item.dtype.kind)
item_length = int(item.dtype.str[j + 1 :])
# Fix padding problem (see #5296).
padding = "\x00" * (field_width - item_length)
fileobj.write(padding.encode("ascii"))
_tdump_file_format = textwrap.dedent(
"""
- **datafile:** Each line of the data file represents one row of table
data. The data is output one column at a time in column order. If
a column contains an array, each element of the column array in the
current row is output before moving on to the next column. Each row
ends with a new line.
Integer data is output right-justified in a 21-character field
followed by a blank. Floating point data is output right justified
using 'g' format in a 21-character field with 15 digits of
precision, followed by a blank. String data that does not contain
whitespace is output left-justified in a field whose width matches
the width specified in the ``TFORM`` header parameter for the
column, followed by a blank. When the string data contains
whitespace characters, the string is enclosed in quotation marks
(``""``). For the last data element in a row, the trailing blank in
the field is replaced by a new line character.
For column data containing variable length arrays ('P' format), the
array data is preceded by the string ``'VLA_Length= '`` and the
integer length of the array for that row, left-justified in a
21-character field, followed by a blank.
.. note::
This format does *not* support variable length arrays using the
('Q' format) due to difficult to overcome ambiguities. What this
means is that this file format cannot support VLA columns in
tables stored in files that are over 2 GB in size.
For column data representing a bit field ('X' format), each bit
value in the field is output right-justified in a 21-character field
as 1 (for true) or 0 (for false).
- **cdfile:** Each line of the column definitions file provides the
definitions for one column in the table. The line is broken up into
8, sixteen-character fields. The first field provides the column
name (``TTYPEn``). The second field provides the column format
(``TFORMn``). The third field provides the display format
(``TDISPn``). The fourth field provides the physical units
(``TUNITn``). The fifth field provides the dimensions for a
multidimensional array (``TDIMn``). The sixth field provides the
value that signifies an undefined value (``TNULLn``). The seventh
field provides the scale factor (``TSCALn``). The eighth field
provides the offset value (``TZEROn``). A field value of ``""`` is
used to represent the case where no value is provided.
- **hfile:** Each line of the header parameters file provides the
definition of a single HDU header card as represented by the card
image.
"""
)
def dump(self, datafile=None, cdfile=None, hfile=None, overwrite=False):
"""
Dump the table HDU to a file in ASCII format. The table may be dumped
in three separate files, one containing column definitions, one
containing header parameters, and one for table data.
Parameters
----------
datafile : path-like or file-like, optional
Output data file. The default is the root name of the
fits file associated with this HDU appended with the
extension ``.txt``.
cdfile : path-like or file-like, optional
Output column definitions file. The default is `None`, no
column definitions output is produced.
hfile : path-like or file-like, optional
Output header parameters file. The default is `None`,
no header parameters output is produced.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
Notes
-----
The primary use for the `dump` method is to allow viewing and editing
the table data and parameters in a standard text editor.
The `load` method can be used to create a new table from the three
plain text (ASCII) files.
"""
if isinstance(datafile, path_like):
datafile = os.path.expanduser(datafile)
if isinstance(cdfile, path_like):
cdfile = os.path.expanduser(cdfile)
if isinstance(hfile, path_like):
hfile = os.path.expanduser(hfile)
# check if the output files already exist
exist = []
files = [datafile, cdfile, hfile]
for f in files:
if isinstance(f, path_like):
if os.path.exists(f) and os.path.getsize(f) != 0:
if overwrite:
os.remove(f)
else:
exist.append(f)
if exist:
raise OSError(
" ".join([f"File '{f}' already exists." for f in exist])
+ " If you mean to "
"replace the file(s) "
"then use the argument "
"'overwrite=True'."
)
# Process the data
self._dump_data(datafile)
# Process the column definitions
if cdfile:
self._dump_coldefs(cdfile)
# Process the header parameters
if hfile:
self._header.tofile(hfile, sep="\n", endcard=False, padding=False)
if isinstance(dump.__doc__, str):
dump.__doc__ += _tdump_file_format.replace("\n", "\n ")
def load(cls, datafile, cdfile=None, hfile=None, replace=False, header=None):
"""
Create a table from the input ASCII files. The input is from up to
three separate files, one containing column definitions, one containing
header parameters, and one containing column data.
The column definition and header parameters files are not required.
When absent the column definitions and/or header parameters are taken
from the header object given in the header argument; otherwise sensible
defaults are inferred (though this mode is not recommended).
Parameters
----------
datafile : path-like or file-like
Input data file containing the table data in ASCII format.
cdfile : path-like or file-like, optional
Input column definition file containing the names,
formats, display formats, physical units, multidimensional
array dimensions, undefined values, scale factors, and
offsets associated with the columns in the table. If
`None`, the column definitions are taken from the current
values in this object.
hfile : path-like or file-like, optional
Input parameter definition file containing the header
parameter definitions to be associated with the table. If
`None`, the header parameter definitions are taken from
the current values in this objects header.
replace : bool, optional
When `True`, indicates that the entire header should be
replaced with the contents of the ASCII file instead of
just updating the current header.
header : `~astropy.io.fits.Header`, optional
When the cdfile and hfile are missing, use this Header object in
the creation of the new table and HDU. Otherwise this Header
supersedes the keywords from hfile, which is only used to update
values not present in this Header, unless ``replace=True`` in which
this Header's values are completely replaced with the values from
hfile.
Notes
-----
The primary use for the `load` method is to allow the input of ASCII
data that was edited in a standard text editor of the table data and
parameters. The `dump` method can be used to create the initial ASCII
files.
"""
# Process the parameter file
if header is None:
header = Header()
if hfile:
if replace:
header = Header.fromtextfile(hfile)
else:
header.extend(
Header.fromtextfile(hfile), update=True, update_first=True
)
coldefs = None
# Process the column definitions file
if cdfile:
coldefs = cls._load_coldefs(cdfile)
# Process the data file
data = cls._load_data(datafile, coldefs)
if coldefs is None:
coldefs = ColDefs(data)
# Create a new HDU using the supplied header and data
hdu = cls(data=data, header=header)
hdu.columns = coldefs
return hdu
if isinstance(load.__doc__, str):
load.__doc__ += _tdump_file_format.replace("\n", "\n ")
load = classmethod(load)
# Have to create a classmethod from this here instead of as a decorator;
# otherwise we can't update __doc__
def _dump_data(self, fileobj):
"""
Write the table data in the ASCII format read by BinTableHDU.load()
to fileobj.
"""
if not fileobj and self._file:
root = os.path.splitext(self._file.name)[0]
fileobj = root + ".txt"
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, "w")
close_file = True
linewriter = csv.writer(fileobj, dialect=FITSTableDumpDialect)
# Process each row of the table and output one row at a time
def format_value(val, format):
if format[0] == "S":
itemsize = int(format[1:])
return "{:{size}}".format(val, size=itemsize)
elif format in np.typecodes["AllInteger"]:
# output integer
return f"{val:21d}"
elif format in np.typecodes["Complex"]:
return f"{val.real:21.15g}+{val.imag:.15g}j"
elif format in np.typecodes["Float"]:
# output floating point
return f"{val:#21.15g}"
for row in self.data:
line = [] # the line for this row of the table
# Process each column of the row.
for column in self.columns:
# format of data in a variable length array
# where None means it is not a VLA:
vla_format = None
format = _convert_format(column.format)
if isinstance(format, _FormatP):
# P format means this is a variable length array so output
# the length of the array for this row and set the format
# for the VLA data
line.append("VLA_Length=")
line.append(f"{len(row[column.name]):21d}")
_, dtype, option = _parse_tformat(column.format)
vla_format = FITS2NUMPY[option[0]][0]
if vla_format:
# Output the data for each element in the array
for val in row[column.name].flat:
line.append(format_value(val, vla_format))
else:
# The column data is a single element
dtype = self.data.dtype.fields[column.name][0]
array_format = dtype.char
if array_format == "V":
array_format = dtype.base.char
if array_format == "S":
array_format += str(dtype.itemsize)
if dtype.char == "V":
for value in row[column.name].flat:
line.append(format_value(value, array_format))
else:
line.append(format_value(row[column.name], array_format))
linewriter.writerow(line)
if close_file:
fileobj.close()
def _dump_coldefs(self, fileobj):
"""
Write the column definition parameters in the ASCII format read by
BinTableHDU.load() to fileobj.
"""
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, "w")
close_file = True
# Process each column of the table and output the result to the
# file one at a time
for column in self.columns:
line = [column.name, column.format]
attrs = ["disp", "unit", "dim", "null", "bscale", "bzero"]
line += [
"{!s:16s}".format(value if value else '""')
for value in (getattr(column, attr) for attr in attrs)
]
fileobj.write(" ".join(line))
fileobj.write("\n")
if close_file:
fileobj.close()
@classmethod
def _load_data(cls, fileobj, coldefs=None):
"""
Read the table data from the ASCII file output by BinTableHDU.dump().
"""
close_file = False
if isinstance(fileobj, path_like):
fileobj = os.path.expanduser(fileobj)
fileobj = open(fileobj)
close_file = True
initialpos = fileobj.tell() # We'll be returning here later
linereader = csv.reader(fileobj, dialect=FITSTableDumpDialect)
# First we need to do some preprocessing on the file to find out how
# much memory we'll need to reserve for the table. This is necessary
# even if we already have the coldefs in order to determine how many
# rows to reserve memory for
vla_lengths = []
recformats = []
names = []
nrows = 0
if coldefs is not None:
recformats = coldefs._recformats
names = coldefs.names
def update_recformats(value, idx):
fitsformat = _scalar_to_format(value)
recformat = _convert_format(fitsformat)
if idx >= len(recformats):
recformats.append(recformat)
else:
if _cmp_recformats(recformats[idx], recformat) < 0:
recformats[idx] = recformat
# TODO: The handling of VLAs could probably be simplified a bit
for row in linereader:
nrows += 1
if coldefs is not None:
continue
col = 0
idx = 0
while idx < len(row):
if row[idx] == "VLA_Length=":
if col < len(vla_lengths):
vla_length = vla_lengths[col]
else:
vla_length = int(row[idx + 1])
vla_lengths.append(vla_length)
idx += 2
while vla_length:
update_recformats(row[idx], col)
vla_length -= 1
idx += 1
col += 1
else:
if col >= len(vla_lengths):
vla_lengths.append(None)
update_recformats(row[idx], col)
col += 1
idx += 1
# Update the recformats for any VLAs
for idx, length in enumerate(vla_lengths):
if length is not None:
recformats[idx] = str(length) + recformats[idx]
dtype = np.rec.format_parser(recformats, names, None).dtype
# TODO: In the future maybe enable loading a bit at a time so that we
# can convert from this format to an actual FITS file on disk without
# needing enough physical memory to hold the entire thing at once
hdu = BinTableHDU.from_columns(
np.recarray(shape=1, dtype=dtype), nrows=nrows, fill=True
)
# TODO: It seems to me a lot of this could/should be handled from
# within the FITS_rec class rather than here.
data = hdu.data
for idx, length in enumerate(vla_lengths):
if length is not None:
arr = data.columns._arrays[idx]
dt = recformats[idx][len(str(length)) :]
# NOTE: FormatQ not supported here; it's hard to determine
# whether or not it will be necessary to use a wider descriptor
# type. The function documentation will have to serve as a
# warning that this is not supported.
recformats[idx] = _FormatP(dt, max=length)
data.columns._recformats[idx] = recformats[idx]
name = data.columns.names[idx]
data._cache_field(name, _makep(arr, arr, recformats[idx]))
def format_value(col, val):
# Special formatting for a couple particular data types
if recformats[col] == FITS2NUMPY["L"]:
return bool(int(val))
elif recformats[col] == FITS2NUMPY["M"]:
# For some reason, in arrays/fields where numpy expects a
# complex it's not happy to take a string representation
# (though it's happy to do that in other contexts), so we have
# to convert the string representation for it:
return complex(val)
else:
return val
# Jump back to the start of the data and create a new line reader
fileobj.seek(initialpos)
linereader = csv.reader(fileobj, dialect=FITSTableDumpDialect)
for row, line in enumerate(linereader):
col = 0
idx = 0
while idx < len(line):
if line[idx] == "VLA_Length=":
vla_len = vla_lengths[col]
idx += 2
slice_ = slice(idx, idx + vla_len)
data[row][col][:] = line[idx : idx + vla_len]
idx += vla_len
elif dtype[col].shape:
# This is an array column
array_size = int(np.multiply.reduce(dtype[col].shape))
slice_ = slice(idx, idx + array_size)
idx += array_size
else:
slice_ = None
if slice_ is None:
# This is a scalar row element
data[row][col] = format_value(col, line[idx])
idx += 1
else:
data[row][col].flat[:] = [
format_value(col, val) for val in line[slice_]
]
col += 1
if close_file:
fileobj.close()
return data
@classmethod
def _load_coldefs(cls, fileobj):
"""
Read the table column definitions from the ASCII file output by
BinTableHDU.dump().
"""
close_file = False
if isinstance(fileobj, path_like):
fileobj = os.path.expanduser(fileobj)
fileobj = open(fileobj)
close_file = True
columns = []
for line in fileobj:
words = line[:-1].split()
kwargs = {}
for key in ["name", "format", "disp", "unit", "dim"]:
kwargs[key] = words.pop(0).replace('""', "")
for key in ["null", "bscale", "bzero"]:
word = words.pop(0).replace('""', "")
if word:
word = _str_to_num(word)
kwargs[key] = word
columns.append(Column(**kwargs))
if close_file:
fileobj.close()
return ColDefs(columns)
@contextlib.contextmanager
def _binary_table_byte_swap(data):
"""
Ensures that all the data of a binary FITS table (represented as a FITS_rec
object) is in a big-endian byte order. Columns are swapped in-place one
at a time, and then returned to their previous byte order when this context
manager exits.
Because a new dtype is needed to represent the byte-swapped columns, the
new dtype is temporarily applied as well.
"""
orig_dtype = data.dtype
names = []
formats = []
offsets = []
to_swap = []
if sys.byteorder == "little":
swap_types = ("<", "=")
else:
swap_types = ("<",)
for idx, name in enumerate(orig_dtype.names):
field = _get_recarray_field(data, idx)
field_dtype, field_offset = orig_dtype.fields[name]
names.append(name)
formats.append(field_dtype)
offsets.append(field_offset)
if isinstance(field, chararray.chararray):
continue
# only swap unswapped
# must use field_dtype.base here since for multi-element dtypes,
# the .str with be '|V<N>' where <N> is the total bytes per element
if field.itemsize > 1 and field_dtype.base.str[0] in swap_types:
to_swap.append(field)
# Override the dtype for this field in the new record dtype with
# the byteswapped version
formats[-1] = field_dtype.newbyteorder()
# deal with var length table
recformat = data.columns._recformats[idx]
if isinstance(recformat, _FormatP):
coldata = data.field(idx)
for c in coldata:
if (
not isinstance(c, chararray.chararray)
and c.itemsize > 1
and c.dtype.str[0] in swap_types
):
to_swap.append(c)
for arr in reversed(to_swap):
arr.byteswap(True)
data.dtype = np.dtype({"names": names, "formats": formats, "offsets": offsets})
yield data
for arr in to_swap:
arr.byteswap(True)
data.dtype = orig_dtype
|
32c169c5b61bf6b7e7f052294247b0f5d42a4e8f8ad9ad44e6ef6ed3eec807a4 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import io
from astropy.io.fits.file import _File
from astropy.io.fits.header import Header, _pad_length
from astropy.io.fits.util import fileobj_name
from astropy.utils import lazyproperty
from .base import NonstandardExtHDU
from .hdulist import HDUList
class FitsHDU(NonstandardExtHDU):
"""
A non-standard extension HDU for encapsulating entire FITS files within a
single HDU of a container FITS file. These HDUs have an extension (that is
an XTENSION keyword) of FITS.
The FITS file contained in the HDU's data can be accessed by the `hdulist`
attribute which returns the contained FITS file as an `HDUList` object.
"""
_extension = "FITS"
@lazyproperty
def hdulist(self):
self._file.seek(self._data_offset)
fileobj = io.BytesIO()
# Read the data into a BytesIO--reading directly from the file
# won't work (at least for gzipped files) due to problems deep
# within the gzip module that make it difficult to read gzip files
# embedded in another file
fileobj.write(self._file.read(self.size))
fileobj.seek(0)
if self._header["COMPRESS"]:
fileobj = gzip.GzipFile(fileobj=fileobj)
return HDUList.fromfile(fileobj, mode="readonly")
@classmethod
def fromfile(cls, filename, compress=False):
"""
Like `FitsHDU.fromhdulist()`, but creates a FitsHDU from a file on
disk.
Parameters
----------
filename : str
The path to the file to read into a FitsHDU
compress : bool, optional
Gzip compress the FITS file
"""
with HDUList.fromfile(filename) as hdulist:
return cls.fromhdulist(hdulist, compress=compress)
@classmethod
def fromhdulist(cls, hdulist, compress=False):
"""
Creates a new FitsHDU from a given HDUList object.
Parameters
----------
hdulist : HDUList
A valid Headerlet object.
compress : bool, optional
Gzip compress the FITS file
"""
fileobj = bs = io.BytesIO()
if compress:
if hasattr(hdulist, "_file"):
name = fileobj_name(hdulist._file)
else:
name = None
fileobj = gzip.GzipFile(name, mode="wb", fileobj=bs)
hdulist.writeto(fileobj)
if compress:
fileobj.close()
# A proper HDUList should still be padded out to a multiple of 2880
# technically speaking
padding = (_pad_length(bs.tell()) * cls._padding_byte).encode("ascii")
bs.write(padding)
bs.seek(0)
cards = [
("XTENSION", cls._extension, "FITS extension"),
("BITPIX", 8, "array data type"),
("NAXIS", 1, "number of array dimensions"),
("NAXIS1", len(bs.getvalue()), "Axis length"),
("PCOUNT", 0, "number of parameters"),
("GCOUNT", 1, "number of groups"),
]
# Add the XINDn keywords proposed by Perry, though nothing is done with
# these at the moment
if len(hdulist) > 1:
for idx, hdu in enumerate(hdulist[1:]):
cards.append(
(
"XIND" + str(idx + 1),
hdu._header_offset,
f"byte offset of extension {idx + 1}",
)
)
cards.append(("COMPRESS", compress, "Uses gzip compression"))
header = Header(cards)
return cls._readfrom_internal(_File(bs), header=header)
@classmethod
def match_header(cls, header):
card = header.cards[0]
if card.keyword != "XTENSION":
return False
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
return xtension == cls._extension
# TODO: Add header verification
def _summary(self):
# TODO: Perhaps make this more descriptive...
return (self.name, self.ver, self.__class__.__name__, len(self._header))
|
60261a692853654c4832843f033e4bfe00938fba4e2cad5c1c1f638dc4dceebc | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import mmap
import sys
import warnings
import numpy as np
from astropy.io.fits.header import Header
from astropy.io.fits.util import (
_is_dask_array,
_is_int,
_is_pseudo_integer,
_pseudo_zero,
)
from astropy.io.fits.verify import VerifyWarning
from astropy.utils import isiterable, lazyproperty
from .base import BITPIX2DTYPE, DELAYED, DTYPE2BITPIX, ExtensionHDU, _ValidHDU
__all__ = ["Section", "PrimaryHDU", "ImageHDU"]
class _ImageBaseHDU(_ValidHDU):
"""FITS image HDU base class.
Attributes
----------
header
image header
data
image data
"""
standard_keyword_comments = {
"SIMPLE": "conforms to FITS standard",
"XTENSION": "Image extension",
"BITPIX": "array data type",
"NAXIS": "number of array dimensions",
"GROUPS": "has groups",
"PCOUNT": "number of parameters",
"GCOUNT": "number of groups",
}
def __init__(
self,
data=None,
header=None,
do_not_scale_image_data=False,
uint=True,
scale_back=False,
ignore_blank=False,
**kwargs,
):
from .groups import GroupsHDU
super().__init__(data=data, header=header)
if data is DELAYED:
# Presumably if data is DELAYED then this HDU is coming from an
# open file, and was not created in memory
if header is None:
# this should never happen
raise ValueError("No header to setup HDU.")
else:
# TODO: Some of this card manipulation should go into the
# PrimaryHDU and GroupsHDU subclasses
# construct a list of cards of minimal header
if isinstance(self, ExtensionHDU):
c0 = ("XTENSION", "IMAGE", self.standard_keyword_comments["XTENSION"])
else:
c0 = ("SIMPLE", True, self.standard_keyword_comments["SIMPLE"])
cards = [
c0,
("BITPIX", 8, self.standard_keyword_comments["BITPIX"]),
("NAXIS", 0, self.standard_keyword_comments["NAXIS"]),
]
if isinstance(self, GroupsHDU):
cards.append(("GROUPS", True, self.standard_keyword_comments["GROUPS"]))
if isinstance(self, (ExtensionHDU, GroupsHDU)):
cards.append(("PCOUNT", 0, self.standard_keyword_comments["PCOUNT"]))
cards.append(("GCOUNT", 1, self.standard_keyword_comments["GCOUNT"]))
if header is not None:
orig = header.copy()
header = Header(cards)
header.extend(orig, strip=True, update=True, end=True)
else:
header = Header(cards)
self._header = header
self._do_not_scale_image_data = do_not_scale_image_data
self._uint = uint
self._scale_back = scale_back
# Keep track of whether BZERO/BSCALE were set from the header so that
# values for self._orig_bzero and self._orig_bscale can be set
# properly, if necessary, once the data has been set.
bzero_in_header = "BZERO" in self._header
bscale_in_header = "BSCALE" in self._header
self._bzero = self._header.get("BZERO", 0)
self._bscale = self._header.get("BSCALE", 1)
# Save off other important values from the header needed to interpret
# the image data
self._axes = [
self._header.get("NAXIS" + str(axis + 1), 0)
for axis in range(self._header.get("NAXIS", 0))
]
# Not supplying a default for BITPIX makes sense because BITPIX
# is either in the header or should be determined from the dtype of
# the data (which occurs when the data is set).
self._bitpix = self._header.get("BITPIX")
self._gcount = self._header.get("GCOUNT", 1)
self._pcount = self._header.get("PCOUNT", 0)
self._blank = None if ignore_blank else self._header.get("BLANK")
self._verify_blank()
self._orig_bitpix = self._bitpix
self._orig_blank = self._header.get("BLANK")
# These get set again below, but need to be set to sensible defaults
# here.
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
# Set the name attribute if it was provided (if this is an ImageHDU
# this will result in setting the EXTNAME keyword of the header as
# well)
if "name" in kwargs and kwargs["name"]:
self.name = kwargs["name"]
if "ver" in kwargs and kwargs["ver"]:
self.ver = kwargs["ver"]
# Set to True if the data or header is replaced, indicating that
# update_header should be called
self._modified = False
if data is DELAYED:
if not do_not_scale_image_data and (self._bscale != 1 or self._bzero != 0):
# This indicates that when the data is accessed or written out
# to a new file it will need to be rescaled
self._data_needs_rescale = True
return
else:
# Setting data will update the header and set _bitpix, _bzero,
# and _bscale to the appropriate BITPIX for the data, and always
# sets _bzero=0 and _bscale=1.
self.data = data
# Check again for BITPIX/BSCALE/BZERO in case they changed when the
# data was assigned. This can happen, for example, if the input
# data is an unsigned int numpy array.
self._bitpix = self._header.get("BITPIX")
# Do not provide default values for BZERO and BSCALE here because
# the keywords will have been deleted in the header if appropriate
# after scaling. We do not want to put them back in if they
# should not be there.
self._bzero = self._header.get("BZERO")
self._bscale = self._header.get("BSCALE")
# Handle case where there was no BZERO/BSCALE in the initial header
# but there should be a BSCALE/BZERO now that the data has been set.
if not bzero_in_header:
self._orig_bzero = self._bzero
if not bscale_in_header:
self._orig_bscale = self._bscale
@classmethod
def match_header(cls, header):
"""
_ImageBaseHDU is sort of an abstract class for HDUs containing image
data (as opposed to table data) and should never be used directly.
"""
raise NotImplementedError
@property
def is_image(self):
return True
@property
def section(self):
"""
Access a section of the image array without loading the entire array
into memory. The :class:`Section` object returned by this attribute is
not meant to be used directly by itself. Rather, slices of the section
return the appropriate slice of the data, and loads *only* that section
into memory.
Sections are useful for retrieving a small subset of data from a remote
file that has been opened with the ``use_fsspec=True`` parameter.
For example, you can use this feature to download a small cutout from
a large FITS image hosted in the Amazon S3 cloud (see the
:ref:`astropy:fits-cloud-files` section of the Astropy
documentation for more details.)
For local files, sections are mostly obsoleted by memmap support, but
should still be used to deal with very large scaled images.
Note that sections cannot currently be written to. Moreover, any
in-memory updates to the image's ``.data`` property may not be
reflected in the slices obtained via ``.section``. See the
:ref:`astropy:data-sections` section of the documentation for
more details.
"""
return Section(self)
@property
def shape(self):
"""
Shape of the image array--should be equivalent to ``self.data.shape``.
"""
# Determine from the values read from the header
return tuple(reversed(self._axes))
@property
def header(self):
return self._header
@header.setter
def header(self, header):
self._header = header
self._modified = True
self.update_header()
@lazyproperty
def data(self):
"""
Image/array data as a `~numpy.ndarray`.
Please remember that the order of axes on an Numpy array are opposite
of the order specified in the FITS file. For example for a 2D image
the "rows" or y-axis are the first dimension, and the "columns" or
x-axis are the second dimension.
If the data is scaled using the BZERO and BSCALE parameters, this
attribute returns the data scaled to its physical values unless the
file was opened with ``do_not_scale_image_data=True``.
"""
if len(self._axes) < 1:
return
data = self._get_scaled_image_data(self._data_offset, self.shape)
self._update_header_scale_info(data.dtype)
return data
@data.setter
def data(self, data):
if "data" in self.__dict__ and self.__dict__["data"] is not None:
if self.__dict__["data"] is data:
return
else:
self._data_replaced = True
was_unsigned = _is_pseudo_integer(self.__dict__["data"].dtype)
else:
self._data_replaced = True
was_unsigned = False
if (
data is not None
and not isinstance(data, np.ndarray)
and not _is_dask_array(data)
):
# Try to coerce the data into a numpy array--this will work, on
# some level, for most objects
try:
data = np.array(data)
except Exception:
raise TypeError(
f"data object {data!r} could not be coerced into an ndarray"
)
if data.shape == ():
raise TypeError(
f"data object {data!r} should have at least one dimension"
)
self.__dict__["data"] = data
self._modified = True
if self.data is None:
self._axes = []
else:
# Set new values of bitpix, bzero, and bscale now, but wait to
# revise original values until header is updated.
self._bitpix = DTYPE2BITPIX[data.dtype.name]
self._bscale = 1
self._bzero = 0
self._blank = None
self._axes = list(data.shape)
self._axes.reverse()
# Update the header, including adding BZERO/BSCALE if new data is
# unsigned. Does not change the values of self._bitpix,
# self._orig_bitpix, etc.
self.update_header()
if data is not None and was_unsigned:
self._update_header_scale_info(data.dtype)
# Keep _orig_bitpix as it was until header update is done, then
# set it, to allow easier handling of the case of unsigned
# integer data being converted to something else. Setting these here
# is needed only for the case do_not_scale_image_data=True when
# setting the data to unsigned int.
# If necessary during initialization, i.e. if BSCALE and BZERO were
# not in the header but the data was unsigned, the attributes below
# will be update in __init__.
self._orig_bitpix = self._bitpix
self._orig_bscale = self._bscale
self._orig_bzero = self._bzero
# returning the data signals to lazyproperty that we've already handled
# setting self.__dict__['data']
return data
def update_header(self):
"""
Update the header keywords to agree with the data.
"""
if not (
self._modified
or self._header._modified
or (self._has_data and self.shape != self.data.shape)
):
# Not likely that anything needs updating
return
old_naxis = self._header.get("NAXIS", 0)
if "BITPIX" not in self._header:
bitpix_comment = self.standard_keyword_comments["BITPIX"]
else:
bitpix_comment = self._header.comments["BITPIX"]
# Update the BITPIX keyword and ensure it's in the correct
# location in the header
self._header.set("BITPIX", self._bitpix, bitpix_comment, after=0)
# If the data's shape has changed (this may have happened without our
# noticing either via a direct update to the data.shape attribute) we
# need to update the internal self._axes
if self._has_data and self.shape != self.data.shape:
self._axes = list(self.data.shape)
self._axes.reverse()
# Update the NAXIS keyword and ensure it's in the correct location in
# the header
if "NAXIS" in self._header:
naxis_comment = self._header.comments["NAXIS"]
else:
naxis_comment = self.standard_keyword_comments["NAXIS"]
self._header.set("NAXIS", len(self._axes), naxis_comment, after="BITPIX")
# TODO: This routine is repeated in several different classes--it
# should probably be made available as a method on all standard HDU
# types
# add NAXISi if it does not exist
for idx, axis in enumerate(self._axes):
naxisn = "NAXIS" + str(idx + 1)
if naxisn in self._header:
self._header[naxisn] = axis
else:
if idx == 0:
after = "NAXIS"
else:
after = "NAXIS" + str(idx)
self._header.set(naxisn, axis, after=after)
# delete extra NAXISi's
for idx in range(len(self._axes) + 1, old_naxis + 1):
try:
del self._header["NAXIS" + str(idx)]
except KeyError:
pass
if "BLANK" in self._header:
self._blank = self._header["BLANK"]
# Add BSCALE/BZERO to header if data is unsigned int.
self._update_pseudo_int_scale_keywords()
self._modified = False
def _update_header_scale_info(self, dtype=None):
"""
Delete BSCALE/BZERO from header if necessary.
"""
# Note that _dtype_for_bitpix determines the dtype based on the
# "original" values of bitpix, bscale, and bzero, stored in
# self._orig_bitpix, etc. It contains the logic for determining which
# special cases of BZERO/BSCALE, if any, are auto-detected as following
# the FITS unsigned int convention.
# Added original_was_unsigned with the intent of facilitating the
# special case of do_not_scale_image_data=True and uint=True
# eventually.
# FIXME: unused, maybe it should be useful?
# if self._dtype_for_bitpix() is not None:
# original_was_unsigned = self._dtype_for_bitpix().kind == 'u'
# else:
# original_was_unsigned = False
if self._do_not_scale_image_data or (
self._orig_bzero == 0 and self._orig_bscale == 1
):
return
if dtype is None:
dtype = self._dtype_for_bitpix()
if (
dtype is not None
and dtype.kind == "u"
and (self._scale_back or self._scale_back is None)
):
# Data is pseudo-unsigned integers, and the scale_back option
# was not explicitly set to False, so preserve all the scale
# factors
return
for keyword in ["BSCALE", "BZERO"]:
try:
del self._header[keyword]
# Since _update_header_scale_info can, currently, be called
# *after* _prewriteto(), replace these with blank cards so
# the header size doesn't change
self._header.append()
except KeyError:
pass
if dtype is None:
dtype = self._dtype_for_bitpix()
if dtype is not None:
self._header["BITPIX"] = DTYPE2BITPIX[dtype.name]
self._bzero = 0
self._bscale = 1
self._bitpix = self._header["BITPIX"]
self._blank = self._header.pop("BLANK", None)
def scale(self, type=None, option="old", bscale=None, bzero=None):
"""
Scale image data by using ``BSCALE``/``BZERO``.
Call to this method will scale `data` and update the keywords of
``BSCALE`` and ``BZERO`` in the HDU's header. This method should only
be used right before writing to the output file, as the data will be
scaled and is therefore not very usable after the call.
Parameters
----------
type : str, optional
destination data type, use a string representing a numpy
dtype name, (e.g. ``'uint8'``, ``'int16'``, ``'float32'``
etc.). If is `None`, use the current data type.
option : str, optional
How to scale the data: ``"old"`` uses the original ``BSCALE`` and
``BZERO`` values from when the data was read/created (defaulting to
1 and 0 if they don't exist). For integer data only, ``"minmax"``
uses the minimum and maximum of the data to scale. User-specified
``bscale``/``bzero`` values always take precedence.
bscale, bzero : int, optional
User-specified ``BSCALE`` and ``BZERO`` values
"""
# Disable blank support for now
self._scale_internal(
type=type, option=option, bscale=bscale, bzero=bzero, blank=None
)
def _scale_internal(
self, type=None, option="old", bscale=None, bzero=None, blank=0
):
"""
This is an internal implementation of the `scale` method, which
also supports handling BLANK properly.
TODO: This is only needed for fixing #3865 without introducing any
public API changes. We should support BLANK better when rescaling
data, and when that is added the need for this internal interface
should go away.
Note: the default of ``blank=0`` merely reflects the current behavior,
and is not necessarily a deliberate choice (better would be to disallow
conversion of floats to ints without specifying a BLANK if there are
NaN/inf values).
"""
if self.data is None:
return
# Determine the destination (numpy) data type
if type is None:
type = BITPIX2DTYPE[self._bitpix]
_type = getattr(np, type)
# Determine how to scale the data
# bscale and bzero takes priority
if bscale is not None and bzero is not None:
_scale = bscale
_zero = bzero
elif bscale is not None:
_scale = bscale
_zero = 0
elif bzero is not None:
_scale = 1
_zero = bzero
elif (
option == "old"
and self._orig_bscale is not None
and self._orig_bzero is not None
):
_scale = self._orig_bscale
_zero = self._orig_bzero
elif option == "minmax" and not issubclass(_type, np.floating):
if _is_dask_array(self.data):
min = self.data.min().compute()
max = self.data.max().compute()
else:
min = np.minimum.reduce(self.data.flat)
max = np.maximum.reduce(self.data.flat)
if _type == np.uint8: # uint8 case
_zero = min
_scale = (max - min) / (2.0**8 - 1)
else:
_zero = (max + min) / 2.0
# throw away -2^N
nbytes = 8 * _type().itemsize
_scale = (max - min) / (2.0**nbytes - 2)
else:
_scale = 1
_zero = 0
# Do the scaling
if _zero != 0:
if _is_dask_array(self.data):
self.data = self.data - _zero
else:
# 0.9.6.3 to avoid out of range error for BZERO = +32768
# We have to explicitly cast _zero to prevent numpy from raising an
# error when doing self.data -= zero, and we do this instead of
# self.data = self.data - zero to avoid doubling memory usage.
np.add(self.data, -_zero, out=self.data, casting="unsafe")
self._header["BZERO"] = _zero
else:
try:
del self._header["BZERO"]
except KeyError:
pass
if _scale and _scale != 1:
self.data = self.data / _scale
self._header["BSCALE"] = _scale
else:
try:
del self._header["BSCALE"]
except KeyError:
pass
# Set blanks
if blank is not None and issubclass(_type, np.integer):
# TODO: Perhaps check that the requested BLANK value fits in the
# integer type being scaled to?
self.data[np.isnan(self.data)] = blank
self._header["BLANK"] = blank
if self.data.dtype.type != _type:
self.data = np.array(np.around(self.data), dtype=_type)
# Update the BITPIX Card to match the data
self._bitpix = DTYPE2BITPIX[self.data.dtype.name]
self._bzero = self._header.get("BZERO", 0)
self._bscale = self._header.get("BSCALE", 1)
self._blank = blank
self._header["BITPIX"] = self._bitpix
# Since the image has been manually scaled, the current
# bitpix/bzero/bscale now serve as the 'original' scaling of the image,
# as though the original image has been completely replaced
self._orig_bitpix = self._bitpix
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
self._orig_blank = self._blank
def _verify(self, option="warn"):
# update_header can fix some things that would otherwise cause
# verification to fail, so do that now...
self.update_header()
self._verify_blank()
return super()._verify(option)
def _verify_blank(self):
# Probably not the best place for this (it should probably happen
# in _verify as well) but I want to be able to raise this warning
# both when the HDU is created and when written
if self._blank is None:
return
messages = []
# TODO: Once the FITSSchema framewhere is merged these warnings
# should be handled by the schema
if not _is_int(self._blank):
messages.append(
"Invalid value for 'BLANK' keyword in header: {!r} "
"The 'BLANK' keyword must be an integer. It will be "
"ignored in the meantime.".format(self._blank)
)
self._blank = None
if not self._bitpix > 0:
messages.append(
"Invalid 'BLANK' keyword in header. The 'BLANK' keyword "
"is only applicable to integer data, and will be ignored "
"in this HDU."
)
self._blank = None
for msg in messages:
warnings.warn(msg, VerifyWarning)
def _prewriteto(self, checksum=False, inplace=False):
if self._scale_back:
self._scale_internal(
BITPIX2DTYPE[self._orig_bitpix], blank=self._orig_blank
)
self.update_header()
if not inplace and self._data_needs_rescale:
# Go ahead and load the scaled image data and update the header
# with the correct post-rescaling headers
_ = self.data
return super()._prewriteto(checksum, inplace)
def _writedata_internal(self, fileobj):
size = 0
if self.data is None:
return size
elif _is_dask_array(self.data):
return self._writeinternal_dask(fileobj)
else:
# Based on the system type, determine the byteorders that
# would need to be swapped to get to big-endian output
if sys.byteorder == "little":
swap_types = ("<", "=")
else:
swap_types = ("<",)
# deal with unsigned integer 16, 32 and 64 data
if _is_pseudo_integer(self.data.dtype):
# Convert the unsigned array to signed
output = np.array(
self.data - _pseudo_zero(self.data.dtype),
dtype=f">i{self.data.dtype.itemsize}",
)
should_swap = False
else:
output = self.data
byteorder = output.dtype.str[0]
should_swap = byteorder in swap_types
if should_swap:
if output.flags.writeable:
output.byteswap(True)
try:
fileobj.writearray(output)
finally:
output.byteswap(True)
else:
# For read-only arrays, there is no way around making
# a byteswapped copy of the data.
fileobj.writearray(output.byteswap(False))
else:
fileobj.writearray(output)
size += output.size * output.itemsize
return size
def _writeinternal_dask(self, fileobj):
if sys.byteorder == "little":
swap_types = ("<", "=")
else:
swap_types = ("<",)
# deal with unsigned integer 16, 32 and 64 data
if _is_pseudo_integer(self.data.dtype):
raise NotImplementedError("This dtype isn't currently supported with dask.")
else:
output = self.data
byteorder = output.dtype.str[0]
should_swap = byteorder in swap_types
if should_swap:
from dask.utils import M
# NOTE: the inplace flag to byteswap needs to be False otherwise the array is
# byteswapped in place every time it is computed and this affects
# the input dask array.
output = output.map_blocks(M.byteswap, False).map_blocks(
M.newbyteorder, "S"
)
initial_position = fileobj.tell()
n_bytes = output.nbytes
# Extend the file n_bytes into the future
fileobj.seek(initial_position + n_bytes - 1)
fileobj.write(b"\0")
fileobj.flush()
if fileobj.fileobj_mode not in ("rb+", "wb+", "ab+"):
# Use another file handle if the current one is not in
# read/write mode
fp = open(fileobj.name, mode="rb+")
should_close = True
else:
fp = fileobj._file
should_close = False
try:
outmmap = mmap.mmap(
fp.fileno(), length=initial_position + n_bytes, access=mmap.ACCESS_WRITE
)
outarr = np.ndarray(
shape=output.shape,
dtype=output.dtype,
offset=initial_position,
buffer=outmmap,
)
output.store(outarr, lock=True, compute=True)
finally:
if should_close:
fp.close()
outmmap.close()
# On Windows closing the memmap causes the file pointer to return to 0, so
# we need to go back to the end of the data (since padding may be written
# after)
fileobj.seek(initial_position + n_bytes)
return n_bytes
def _dtype_for_bitpix(self):
"""
Determine the dtype that the data should be converted to depending on
the BITPIX value in the header, and possibly on the BSCALE value as
well. Returns None if there should not be any change.
"""
bitpix = self._orig_bitpix
# Handle possible conversion to uints if enabled
if self._uint and self._orig_bscale == 1:
if bitpix == 8 and self._orig_bzero == -128:
return np.dtype("int8")
for bits, dtype in (
(16, np.dtype("uint16")),
(32, np.dtype("uint32")),
(64, np.dtype("uint64")),
):
if bitpix == bits and self._orig_bzero == 1 << (bits - 1):
return dtype
if bitpix > 16: # scale integers to Float64
return np.dtype("float64")
elif bitpix > 0: # scale integers to Float32
return np.dtype("float32")
def _convert_pseudo_integer(self, data):
"""
Handle "pseudo-unsigned" integers, if the user requested it. Returns
the converted data array if so; otherwise returns None.
In this case case, we don't need to handle BLANK to convert it to NAN,
since we can't do NaNs with integers, anyway, i.e. the user is
responsible for managing blanks.
"""
dtype = self._dtype_for_bitpix()
# bool(dtype) is always False--have to explicitly compare to None; this
# caused a fair amount of hair loss
if dtype is not None and dtype.kind == "u":
# Convert the input raw data into an unsigned integer array and
# then scale the data adjusting for the value of BZERO. Note that
# we subtract the value of BZERO instead of adding because of the
# way numpy converts the raw signed array into an unsigned array.
bits = dtype.itemsize * 8
data = np.array(data, dtype=dtype)
data -= np.uint64(1 << (bits - 1))
return data
def _get_scaled_image_data(self, offset, shape):
"""
Internal function for reading image data from a file and apply scale
factors to it. Normally this is used for the entire image, but it
supports alternate offset/shape for Section support.
"""
code = BITPIX2DTYPE[self._orig_bitpix]
raw_data = self._get_raw_data(shape, code, offset)
raw_data.dtype = raw_data.dtype.newbyteorder(">")
if self._do_not_scale_image_data or (
self._orig_bzero == 0 and self._orig_bscale == 1 and self._blank is None
):
# No further conversion of the data is necessary
return raw_data
try:
if self._file.strict_memmap:
raise ValueError(
"Cannot load a memory-mapped image: "
"BZERO/BSCALE/BLANK header keywords present. "
"Set memmap=False."
)
except AttributeError: # strict_memmap not set
pass
data = None
if not (self._orig_bzero == 0 and self._orig_bscale == 1):
data = self._convert_pseudo_integer(raw_data)
if data is None:
# In these cases, we end up with floating-point arrays and have to
# apply bscale and bzero. We may have to handle BLANK and convert
# to NaN in the resulting floating-point arrays.
# The BLANK keyword should only be applied for integer data (this
# is checked in __init__ but it can't hurt to double check here)
blanks = None
if self._blank is not None and self._bitpix > 0:
blanks = raw_data.flat == self._blank
# The size of blanks in bytes is the number of elements in
# raw_data.flat. However, if we use np.where instead we will
# only use 8 bytes for each index where the condition is true.
# So if the number of blank items is fewer than
# len(raw_data.flat) / 8, using np.where will use less memory
if blanks.sum() < len(blanks) / 8:
blanks = np.where(blanks)
new_dtype = self._dtype_for_bitpix()
if new_dtype is not None:
data = np.array(raw_data, dtype=new_dtype)
else: # floating point cases
if self._file is not None and self._file.memmap:
data = raw_data.copy()
elif not raw_data.flags.writeable:
# create a writeable copy if needed
data = raw_data.copy()
# if not memmap, use the space already in memory
else:
data = raw_data
del raw_data
if self._orig_bscale != 1:
np.multiply(data, self._orig_bscale, data)
if self._orig_bzero != 0:
data += self._orig_bzero
if self._blank:
data.flat[blanks] = np.nan
return data
def _summary(self):
"""
Summarize the HDU: name, dimensions, and formats.
"""
class_name = self.__class__.__name__
# if data is touched, use data info.
if self._data_loaded:
if self.data is None:
format = ""
else:
format = self.data.dtype.name
format = format[format.rfind(".") + 1 :]
else:
if self.shape and all(self.shape):
# Only show the format if all the dimensions are non-zero
# if data is not touched yet, use header info.
format = BITPIX2DTYPE[self._bitpix]
else:
format = ""
if (
format
and not self._do_not_scale_image_data
and (self._orig_bscale != 1 or self._orig_bzero != 0)
):
new_dtype = self._dtype_for_bitpix()
if new_dtype is not None:
format += f" (rescales to {new_dtype.name})"
# Display shape in FITS-order
shape = tuple(reversed(self.shape))
return (self.name, self.ver, class_name, len(self._header), shape, format, "")
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# We have the data to be used.
d = self.data
# First handle the special case where the data is unsigned integer
# 16, 32 or 64
if _is_pseudo_integer(self.data.dtype):
d = np.array(
self.data - _pseudo_zero(self.data.dtype),
dtype=f"i{self.data.dtype.itemsize}",
)
# Check the byte order of the data. If it is little endian we
# must swap it before calculating the datasum.
if d.dtype.str[0] != ">":
if d.flags.writeable:
byteswapped = True
d = d.byteswap(True)
d.dtype = d.dtype.newbyteorder(">")
else:
# If the data is not writeable, we just make a byteswapped
# copy and don't bother changing it back after
d = d.byteswap(False)
d.dtype = d.dtype.newbyteorder(">")
byteswapped = False
else:
byteswapped = False
cs = self._compute_checksum(d.flatten().view(np.uint8))
# If the data was byteswapped in this method then return it to
# its original little-endian order.
if byteswapped and not _is_pseudo_integer(self.data.dtype):
d.byteswap(True)
d.dtype = d.dtype.newbyteorder("<")
return cs
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
class Section:
"""
Class enabling subsets of ImageHDU data to be loaded lazily via slicing.
Slices of this object load the corresponding section of an image array from
the underlying FITS file, and applies any BSCALE/BZERO factors.
Section slices cannot be assigned to, and modifications to a section are
not saved back to the underlying file.
See the :ref:`astropy:data-sections` section of the Astropy documentation
for more details.
"""
def __init__(self, hdu):
self.hdu = hdu
@property
def shape(self):
# Implementing `.shape` enables `astropy.nddata.Cutout2D` to accept
# `ImageHDU.section` in place of `.data`.
return self.hdu.shape
def __getitem__(self, key):
"""Returns a slice of HDU data specified by `key`.
If the image HDU is backed by a file handle, this method will only read
the chunks of the file needed to extract `key`, which is useful in
situations where the file is located on a slow or remote file system
(e.g., cloud storage).
"""
if not isinstance(key, tuple):
key = (key,)
naxis = len(self.hdu.shape)
return_scalar = (
all(isinstance(k, (int, np.integer)) for k in key) and len(key) == naxis
)
if not any(k is Ellipsis for k in key):
# We can always add a ... at the end, after making note of whether
# to return a scalar.
key += (Ellipsis,)
ellipsis_count = len([k for k in key if k is Ellipsis])
if len(key) - ellipsis_count > naxis or ellipsis_count > 1:
raise IndexError("too many indices for array")
# Insert extra dimensions as needed.
idx = next(i for i, k in enumerate(key + (Ellipsis,)) if k is Ellipsis)
key = key[:idx] + (slice(None),) * (naxis - len(key) + 1) + key[idx + 1 :]
return_0dim = (
all(isinstance(k, (int, np.integer)) for k in key) and len(key) == naxis
)
dims = []
offset = 0
# Find all leading axes for which a single point is used.
for idx in range(naxis):
axis = self.hdu.shape[idx]
indx = _IndexInfo(key[idx], axis)
offset = offset * axis + indx.offset
if not _is_int(key[idx]):
dims.append(indx.npts)
break
is_contiguous = indx.contiguous
for jdx in range(idx + 1, naxis):
axis = self.hdu.shape[jdx]
indx = _IndexInfo(key[jdx], axis)
dims.append(indx.npts)
if indx.npts == axis and indx.contiguous:
# The offset needs to multiply the length of all remaining axes
offset *= axis
else:
is_contiguous = False
if is_contiguous:
dims = tuple(dims) or (1,)
bitpix = self.hdu._orig_bitpix
offset = self.hdu._data_offset + offset * abs(bitpix) // 8
# Note: the actual file read operations are delegated to
# `util._array_from_file` via `ImageHDU._get_scaled_image_data`
data = self.hdu._get_scaled_image_data(offset, dims)
else:
data = self._getdata(key)
if return_scalar:
data = data.item()
elif return_0dim:
data = data.squeeze()
return data
def _getdata(self, keys):
for idx, (key, axis) in enumerate(zip(keys, self.hdu.shape)):
if isinstance(key, slice):
ks = range(*key.indices(axis))
break
elif isiterable(key):
# Handle both integer and boolean arrays.
ks = np.arange(axis, dtype=int)[key]
break
# This should always break at some point if _getdata is called.
data = [self[keys[:idx] + (k,) + keys[idx + 1 :]] for k in ks]
if any(isinstance(key, slice) or isiterable(key) for key in keys[idx + 1 :]):
# data contains multidimensional arrays; combine them.
return np.array(data)
else:
# Only singleton dimensions remain; concatenate in a 1D array.
return np.concatenate([np.atleast_1d(array) for array in data])
class PrimaryHDU(_ImageBaseHDU):
"""
FITS primary HDU class.
"""
_default_name = "PRIMARY"
def __init__(
self,
data=None,
header=None,
do_not_scale_image_data=False,
ignore_blank=False,
uint=True,
scale_back=None,
):
"""
Construct a primary HDU.
Parameters
----------
data : array or ``astropy.io.fits.hdu.base.DELAYED``, optional
The data in the HDU.
header : `~astropy.io.fits.Header`, optional
The header to be used (as a template). If ``header`` is `None`, a
minimal header will be provided.
do_not_scale_image_data : bool, optional
If `True`, image data is not scaled using BSCALE/BZERO values
when read. (default: False)
ignore_blank : bool, optional
If `True`, the BLANK header keyword will be ignored if present.
Otherwise, pixels equal to this value will be replaced with
NaNs. (default: False)
uint : bool, optional
Interpret signed integer data where ``BZERO`` is the
central value and ``BSCALE == 1`` as unsigned integer
data. For example, ``int16`` data with ``BZERO = 32768``
and ``BSCALE = 1`` would be treated as ``uint16`` data.
(default: True)
scale_back : bool, optional
If `True`, when saving changes to a file that contained scaled
image data, restore the data to the original type and reapply the
original BSCALE/BZERO values. This could lead to loss of accuracy
if scaling back to integer values after performing floating point
operations on the data. Pseudo-unsigned integers are automatically
rescaled unless scale_back is explicitly set to `False`.
(default: None)
"""
super().__init__(
data=data,
header=header,
do_not_scale_image_data=do_not_scale_image_data,
uint=uint,
ignore_blank=ignore_blank,
scale_back=scale_back,
)
# insert the keywords EXTEND
if header is None:
dim = self._header["NAXIS"]
if dim == 0:
dim = ""
self._header.set("EXTEND", True, after="NAXIS" + str(dim))
@classmethod
def match_header(cls, header):
card = header.cards[0]
# Due to problems discussed in #5808, we cannot assume the 'GROUPS'
# keyword to be True/False, have to check the value
return (
card.keyword == "SIMPLE"
and ("GROUPS" not in header or header["GROUPS"] != True) # noqa: E712
and card.value
)
def update_header(self):
super().update_header()
# Update the position of the EXTEND keyword if it already exists
if "EXTEND" in self._header:
if len(self._axes):
after = "NAXIS" + str(len(self._axes))
else:
after = "NAXIS"
self._header.set("EXTEND", after=after)
def _verify(self, option="warn"):
errs = super()._verify(option=option)
# Verify location and value of mandatory keywords.
# The EXTEND keyword is only mandatory if the HDU has extensions; this
# condition is checked by the HDUList object. However, if we already
# have an EXTEND keyword check that its position is correct
if "EXTEND" in self._header:
naxis = self._header.get("NAXIS", 0)
self.req_cards(
"EXTEND", naxis + 3, lambda v: isinstance(v, bool), True, option, errs
)
return errs
class ImageHDU(_ImageBaseHDU, ExtensionHDU):
"""
FITS image extension HDU class.
"""
_extension = "IMAGE"
def __init__(
self,
data=None,
header=None,
name=None,
do_not_scale_image_data=False,
uint=True,
scale_back=None,
ver=None,
):
"""
Construct an image HDU.
Parameters
----------
data : array
The data in the HDU.
header : `~astropy.io.fits.Header`
The header to be used (as a template). If ``header`` is
`None`, a minimal header will be provided.
name : str, optional
The name of the HDU, will be the value of the keyword
``EXTNAME``.
do_not_scale_image_data : bool, optional
If `True`, image data is not scaled using BSCALE/BZERO values
when read. (default: False)
uint : bool, optional
Interpret signed integer data where ``BZERO`` is the
central value and ``BSCALE == 1`` as unsigned integer
data. For example, ``int16`` data with ``BZERO = 32768``
and ``BSCALE = 1`` would be treated as ``uint16`` data.
(default: True)
scale_back : bool, optional
If `True`, when saving changes to a file that contained scaled
image data, restore the data to the original type and reapply the
original BSCALE/BZERO values. This could lead to loss of accuracy
if scaling back to integer values after performing floating point
operations on the data. Pseudo-unsigned integers are automatically
rescaled unless scale_back is explicitly set to `False`.
(default: None)
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
"""
# This __init__ currently does nothing differently from the base class,
# and is only explicitly defined for the docstring.
super().__init__(
data=data,
header=header,
name=name,
do_not_scale_image_data=do_not_scale_image_data,
uint=uint,
scale_back=scale_back,
ver=ver,
)
@classmethod
def match_header(cls, header):
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
return card.keyword == "XTENSION" and xtension == cls._extension
def _verify(self, option="warn"):
"""
ImageHDU verify method.
"""
errs = super()._verify(option=option)
naxis = self._header.get("NAXIS", 0)
# PCOUNT must == 0, GCOUNT must == 1; the former is verified in
# ExtensionHDU._verify, however ExtensionHDU._verify allows PCOUNT
# to be >= 0, so we need to check it here
self.req_cards(
"PCOUNT", naxis + 3, lambda v: (_is_int(v) and v == 0), 0, option, errs
)
return errs
class _IndexInfo:
def __init__(self, indx, naxis):
if _is_int(indx):
if indx < 0: # support negative indexing
indx = indx + naxis
if 0 <= indx < naxis:
self.npts = 1
self.offset = indx
self.contiguous = True
else:
raise IndexError(f"Index {indx} out of range.")
elif isinstance(indx, slice):
start, stop, step = indx.indices(naxis)
self.npts = (stop - start) // step
self.offset = start
self.contiguous = step == 1
elif isiterable(indx):
self.npts = len(indx)
self.offset = 0
self.contiguous = False
else:
raise IndexError(f"Illegal index {indx}")
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.